summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c140
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c829
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h103
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c376
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h39
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c7
26 files changed, 1837 insertions, 76 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 6b5ddb07ee83..98f430905ffa 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -110,6 +110,9 @@ config QED_RDMA
config QED_ISCSI
bool
+config QED_NVMETCP
+ bool
+
config QED_FCOE
bool
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 8251755ec18c..0d9c2fe0245d 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -28,6 +28,11 @@ qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_OOO) += qed_ooo.o
+qed-$(CONFIG_QED_NVMETCP) += \
+ qed_nvmetcp.o \
+ qed_nvmetcp_fw_funcs.o \
+ qed_nvmetcp_ip_services.o
+
qed-$(CONFIG_QED_RDMA) += \
qed_iwarp.o \
qed_rdma.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index a20cb8a0c377..b590c70539b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -49,6 +49,8 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_MIN_WIDS (4)
#define QED_PF_DEMS_SIZE (4)
+#define QED_LLH_DONT_CARE 0
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -200,6 +202,7 @@ enum qed_pci_personality {
QED_PCI_ETH,
QED_PCI_FCOE,
QED_PCI_ISCSI,
+ QED_PCI_NVMETCP,
QED_PCI_ETH_ROCE,
QED_PCI_ETH_IWARP,
QED_PCI_ETH_RDMA,
@@ -239,6 +242,7 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
+ QED_NVMETCP_CQ,
QED_ISCSI_CQ,
QED_FCOE_CQ,
QED_VF_L2_QUE,
@@ -284,6 +288,8 @@ struct qed_hw_info {
((dev)->hw_info.personality == QED_PCI_FCOE)
#define QED_IS_ISCSI_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ISCSI)
+#define QED_IS_NVMETCP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_NVMETCP)
/* Resource Allocation scheme results */
u32 resc_start[QED_MAX_RESC];
@@ -592,6 +598,7 @@ struct qed_hwfn {
struct qed_ooo_info *p_ooo_info;
struct qed_rdma_info *p_rdma_info;
struct qed_iscsi_info *p_iscsi_info;
+ struct qed_nvmetcp_info *p_nvmetcp_info;
struct qed_fcoe_info *p_fcoe_info;
struct qed_pf_params pf_params;
@@ -828,6 +835,7 @@ struct qed_dev {
struct qed_eth_cb_ops *eth;
struct qed_fcoe_cb_ops *fcoe;
struct qed_iscsi_cb_ops *iscsi;
+ struct qed_nvmetcp_cb_ops *nvmetcp;
} protocol_ops;
void *ops_cookie;
@@ -999,4 +1007,10 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
+
+int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
+void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+void qed_llh_clear_all_filters(struct qed_dev *cdev);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0a22f8ce9a2c..5a0a3cbcc1c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -94,14 +94,14 @@ struct src_ent {
static bool src_proto(enum protocol_type type)
{
- return type == PROTOCOLID_ISCSI ||
+ return type == PROTOCOLID_TCP_ULP ||
type == PROTOCOLID_FCOE ||
type == PROTOCOLID_IWARP;
}
static bool tm_cid_proto(enum protocol_type type)
{
- return type == PROTOCOLID_ISCSI ||
+ return type == PROTOCOLID_TCP_ULP ||
type == PROTOCOLID_FCOE ||
type == PROTOCOLID_ROCE ||
type == PROTOCOLID_IWARP;
@@ -2072,7 +2072,6 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
PROTOCOLID_FCOE,
p_params->num_cons,
0);
-
qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
QED_CXT_FCOE_TID_SEG, 0,
p_params->num_tasks, true);
@@ -2090,13 +2089,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
if (p_params->num_cons && p_params->num_tasks) {
qed_cxt_set_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
p_params->num_cons,
0);
-
qed_cxt_set_proto_tid_count(p_hwfn,
- PROTOCOLID_ISCSI,
- QED_CXT_ISCSI_TID_SEG,
+ PROTOCOLID_TCP_ULP,
+ QED_CXT_TCP_ULP_TID_SEG,
0,
p_params->num_tasks,
true);
@@ -2106,6 +2104,29 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
}
break;
}
+ case QED_PCI_NVMETCP:
+ {
+ struct qed_nvmetcp_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ p_params->num_cons,
+ 0);
+ qed_cxt_set_proto_tid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ QED_CXT_TCP_ULP_TID_SEG,
+ 0,
+ p_params->num_tasks,
+ true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "NvmeTCP personality used without setting params!\n");
+ }
+ break;
+ }
default:
return -EINVAL;
}
@@ -2129,8 +2150,9 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
seg = QED_CXT_FCOE_TID_SEG;
break;
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
- seg = QED_CXT_ISCSI_TID_SEG;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ seg = QED_CXT_TCP_ULP_TID_SEG;
break;
default:
return -EINVAL;
@@ -2455,8 +2477,9 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
seg = QED_CXT_FCOE_TID_SEG;
break;
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
- seg = QED_CXT_ISCSI_TID_SEG;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ seg = QED_CXT_TCP_ULP_TID_SEG;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 056e79620a0e..8adb7ed0c12d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -50,7 +50,7 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
struct qed_tid_mem *p_info);
-#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI
+#define QED_CXT_TCP_ULP_TID_SEG PROTOCOLID_TCP_ULP
#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE
enum qed_cxt_elem_type {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index d2f5855b2ea7..0410c3604abd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -37,6 +37,7 @@
#include "qed_sriov.h"
#include "qed_vf.h"
#include "qed_rdma.h"
+#include "qed_nvmetcp.h"
static DEFINE_SPINLOCK(qm_lock);
@@ -667,7 +668,8 @@ qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
/* Storage PF is bound to a single engine while L2 PF uses both */
- if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) ||
+ QED_IS_NVMETCP_PERSONALITY(p_hwfn))
eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
else /* L2_PERSONALITY */
eng = QED_BOTH_ENG;
@@ -1164,6 +1166,9 @@ void qed_llh_remove_mac_filter(struct qed_dev *cdev,
if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
goto out;
+ if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ return;
+
ether_addr_copy(filter.mac.addr, mac_addr);
rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
&ref_cnt);
@@ -1381,6 +1386,11 @@ void qed_resc_free(struct qed_dev *cdev)
qed_ooo_free(p_hwfn);
}
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ qed_nvmetcp_free(p_hwfn);
+ qed_ooo_free(p_hwfn);
+ }
+
if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
qed_rdma_info_free(p_hwfn);
@@ -1423,6 +1433,7 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
flags |= PQ_FLAGS_OFLD;
break;
case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
break;
case QED_PCI_ETH_ROCE:
@@ -2263,10 +2274,11 @@ int qed_resc_alloc(struct qed_dev *cdev)
* at the same time
*/
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
- } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
NULL);
n_eqes += 2 * num_cons;
}
@@ -2313,6 +2325,15 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
}
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ rc = qed_nvmetcp_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ rc = qed_ooo_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+
if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
rc = qed_rdma_info_alloc(p_hwfn);
if (rc)
@@ -2393,6 +2414,11 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_iscsi_setup(p_hwfn);
qed_ooo_setup(p_hwfn);
}
+
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ qed_nvmetcp_setup(p_hwfn);
+ qed_ooo_setup(p_hwfn);
+ }
}
}
@@ -2854,7 +2880,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
- (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
+ ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) ||
+ (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
@@ -3535,14 +3562,21 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
+
+ if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt,
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
+
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n",
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+ (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ),
(int)sb_cnt.cnt);
}
@@ -3734,7 +3768,8 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
break;
case QED_BDQ:
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
- p_hwfn->hw_info.personality != QED_PCI_FCOE)
+ p_hwfn->hw_info.personality != QED_PCI_FCOE &&
+ p_hwfn->hw_info.personality != QED_PCI_NVMETCP)
*p_resc_num = 0;
else
*p_resc_num = 1;
@@ -3755,7 +3790,8 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_start = 0;
else if (p_hwfn->cdev->num_ports_in_engine == 4)
*p_resc_start = p_hwfn->port_id;
- else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
*p_resc_start = p_hwfn->port_id;
else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
*p_resc_start = p_hwfn->port_id + 2;
@@ -5326,3 +5362,93 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
+
+static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "remove_all");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(*p_filters));
+
+ return 0;
+}
+
+static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
+ !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ rc = qed_llh_shadow_remove_all_filters(cdev, ppfid);
+ if (rc)
+ goto out;
+
+ for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
+ filter_idx++) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt,
+ abs_ppfid, filter_idx);
+ if (rc)
+ goto out;
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
+{
+ return qed_llh_add_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ src_port, QED_LLH_DONT_CARE);
+}
+
+void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
+{
+ qed_llh_remove_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ src_port, QED_LLH_DONT_CARE);
+}
+
+int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
+{
+ return qed_llh_add_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_DONT_CARE, dest_port);
+}
+
+void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
+{
+ qed_llh_remove_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_DONT_CARE, dest_port);
+}
+
+void qed_llh_clear_all_filters(struct qed_dev *cdev)
+{
+ u8 ppfid;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
+ !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ return;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++)
+ qed_llh_clear_ppfid_filters(cdev, ppfid);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 559df9f4d656..fb1baa2da2d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -20,6 +20,7 @@
#include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h>
+#include <linux/qed/nvmetcp_common.h>
#include <linux/qed/iwarp_common.h>
#include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h>
@@ -1118,7 +1119,7 @@ struct outer_tag_config_struct {
/* personality per PF */
enum personality_type {
BAD_PERSONALITY_TYP,
- PERSONALITY_ISCSI,
+ PERSONALITY_TCP_ULP,
PERSONALITY_FCOE,
PERSONALITY_RDMA_AND_ETH,
PERSONALITY_RDMA,
@@ -12147,7 +12148,8 @@ struct public_func {
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040
#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
#define FUNC_MF_CFG_MIN_BW_SHIFT 8
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 4eae4ee3538f..db926d8b3033 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -158,7 +158,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_INIT_FUNC,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -250,7 +250,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
- qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI,
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
qed_iscsi_async_event);
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -286,7 +286,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -453,7 +453,7 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
struct iscsi_conn_update_ramrod_params *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
u32 dval;
/* Get SPQ entry */
@@ -465,7 +465,7 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -506,7 +506,7 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -548,7 +548,7 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -582,7 +582,7 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -606,13 +606,13 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_DESTROY_FUNC,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
return rc;
}
@@ -786,7 +786,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
u32 icid;
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ISCSI, &icid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 49783f365079..02a4610d9330 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -960,7 +960,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
- conn_type != QED_LL2_TYPE_IWARP) {
+ conn_type != QED_LL2_TYPE_IWARP &&
+ (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) {
p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1;
} else {
@@ -1037,8 +1038,8 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
case QED_LL2_TYPE_FCOE:
p_ramrod->conn_type = PROTOCOLID_FCOE;
break;
- case QED_LL2_TYPE_ISCSI:
- p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ case QED_LL2_TYPE_TCP_ULP:
+ p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
break;
case QED_LL2_TYPE_ROCE:
p_ramrod->conn_type = PROTOCOLID_ROCE;
@@ -1047,8 +1048,9 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
case QED_LL2_TYPE_OOO:
- if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
- p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
+ p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
else
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
@@ -1634,7 +1636,8 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
if (rc)
goto out;
- if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
+ if (!QED_IS_RDMA_PERSONALITY(p_hwfn) &&
+ !QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
@@ -2376,7 +2379,8 @@ out:
static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
{
return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
- QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
+ QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
+ QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
(QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
}
@@ -2402,11 +2406,13 @@ static int qed_ll2_stop(struct qed_dev *cdev)
if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
return 0;
+ if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
eth_zero_addr(cdev->ll2_mac_address);
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_ll2_stop_ooo(p_hwfn);
/* In CMT mode, LL2 is always started on engine 0 for a storage PF */
@@ -2442,7 +2448,8 @@ static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
conn_type = QED_LL2_TYPE_FCOE;
break;
case QED_PCI_ISCSI:
- conn_type = QED_LL2_TYPE_ISCSI;
+ case QED_PCI_NVMETCP:
+ conn_type = QED_LL2_TYPE_TCP_ULP;
break;
case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE;
@@ -2567,7 +2574,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
}
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(p_hwfn, params);
if (rc) {
@@ -2576,10 +2583,13 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
}
- rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
- if (rc) {
- DP_NOTICE(cdev, "Failed to add an LLH filter\n");
- goto err3;
+ if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
+ rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to add an LLH filter\n");
+ goto err3;
+ }
+
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
@@ -2587,7 +2597,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
return 0;
err3:
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_ll2_stop_ooo(p_hwfn);
err2:
if (b_is_storage_eng1)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index cd882c453394..4387292c37e2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2446,6 +2446,9 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
+ case FUNC_MF_CFG_PROTOCOL_NVMETCP:
+ *p_proto = QED_PCI_NVMETCP;
+ break;
case FUNC_MF_CFG_PROTOCOL_FCOE:
*p_proto = QED_PCI_FCOE;
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index 3e3192a3ad9b..6190adf965bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -1306,7 +1306,8 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
if ((tlv_group & QED_MFW_TLV_ISCSI) &&
- p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+ p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_NVMETCP) {
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Skipping iSCSI TLVs for non-iSCSI function\n");
tlv_group &= ~QED_MFW_TLV_ISCSI;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
new file mode 100644
index 000000000000..f19128c8d9cc
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_nvmetcp.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+#include "qed_reg_addr.h"
+#include "qed_nvmetcp_fw_funcs.h"
+
+static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+ u16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ if (p_hwfn->p_nvmetcp_info->event_cb) {
+ struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info;
+
+ return p_nvmetcp->event_cb(p_nvmetcp->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n");
+
+ return -EINVAL;
+ }
+}
+
+static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr,
+ void *event_context,
+ nvmetcp_event_cb_t async_event_cb)
+{
+ struct nvmetcp_init_ramrod_params *p_ramrod = NULL;
+ struct qed_nvmetcp_pf_params *p_params = NULL;
+ struct scsi_init_func_queues *p_queue = NULL;
+ struct nvmetcp_spe_func_init *p_init = NULL;
+ struct qed_sp_init_data init_data = {};
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+ u16 val;
+ u8 i;
+
+ /* Get SPQ entry */
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_init;
+ p_init = &p_ramrod->nvmetcp_init_spe;
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+ p_queue = &p_init->q_params;
+ p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
+ p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
+ p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
+ p_params->ll2_ooo_queue_id;
+ SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1);
+ p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
+ p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks);
+ p_init->debug_flags = p_params->debug_mode;
+ DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
+ p_params->glbl_q_params_addr);
+ p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE);
+ p_queue->num_queues = p_params->num_queues;
+ val = RESC_START(p_hwfn, QED_CMDQS_CQS);
+ p_queue->queue_relative_offset = cpu_to_le16((u16)val);
+ p_queue->cq_sb_pi = p_params->gl_rq_pi;
+
+ for (i = 0; i < p_params->num_queues; i++) {
+ val = qed_get_igu_sb_id(p_hwfn, i);
+ p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
+ }
+
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0);
+ p_queue->cmdq_num_entries = 0;
+ p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
+ p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER);
+ p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER);
+ p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT);
+ p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT;
+ SET_FIELD(p_ramrod->nvmetcp_init_spe.params,
+ NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT);
+ p_hwfn->p_nvmetcp_info->event_context = event_context;
+ p_hwfn->p_nvmetcp_info->event_cb = async_event_cb;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
+ qed_nvmetcp_async_event);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
+
+ return rc;
+}
+
+static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info)
+{
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
+ int rc;
+
+ memset(info, 0, sizeof(*info));
+ rc = qed_fill_dev_info(cdev, &info->common);
+ info->port_id = MFW_PORT(hwfn);
+ info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ);
+
+ return rc;
+}
+
+static void qed_register_nvmetcp_ops(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops,
+ void *cookie)
+{
+ cdev->protocol_ops.nvmetcp = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static int qed_nvmetcp_stop(struct qed_dev *cdev)
+{
+ int rc;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+ DP_NOTICE(cdev, "nvmetcp already stopped\n");
+
+ return 0;
+ }
+
+ if (!hash_empty(cdev->connections)) {
+ DP_NOTICE(cdev,
+ "Can't stop nvmetcp - not all connections were returned\n");
+
+ return -EINVAL;
+ }
+
+ /* Stop the nvmetcp */
+ rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
+ cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+
+ return rc;
+}
+
+static int qed_nvmetcp_start(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context,
+ nvmetcp_event_cb_t async_event_cb)
+{
+ struct qed_tid_mem *tid_info;
+ int rc;
+
+ if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+ DP_NOTICE(cdev, "nvmetcp already started;\n");
+
+ return 0;
+ }
+
+ rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev),
+ QED_SPQ_MODE_EBLOCK, NULL,
+ event_context, async_event_cb);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start nvmetcp\n");
+
+ return rc;
+ }
+
+ cdev->flags |= QED_FLAG_STORAGE_STARTED;
+ hash_init(cdev->connections);
+
+ if (!tasks)
+ return 0;
+
+ tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
+ if (!tid_info) {
+ qed_nvmetcp_stop(cdev);
+
+ return -ENOMEM;
+ }
+
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to gather task information\n");
+ qed_nvmetcp_stop(cdev);
+ kfree(tid_info);
+
+ return rc;
+ }
+
+ /* Fill task information */
+ tasks->size = tid_info->tid_size;
+ tasks->num_tids_per_block = tid_info->num_tids_per_block;
+ memcpy(tasks->blocks, tid_info->blocks,
+ MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *));
+ kfree(tid_info);
+
+ return 0;
+}
+
+static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev,
+ u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con = NULL;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+ return NULL;
+
+ hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+ if (hash_con->con->icid == handle)
+ break;
+ }
+
+ if (!hash_con || hash_con->con->icid != handle)
+ return NULL;
+
+ return hash_con;
+}
+
+static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_spe_conn_offload *p_ramrod = NULL;
+ struct tcp_offload_params_opt2 *p_tcp = NULL;
+ struct qed_sp_init_data init_data = { 0 };
+ struct qed_spq_entry *p_ent = NULL;
+ dma_addr_t r2tq_pbl_addr;
+ dma_addr_t xhq_pbl_addr;
+ dma_addr_t uhq_pbl_addr;
+ u16 physical_q;
+ int rc = 0;
+ u8 i;
+
+ /* Get SPQ entry */
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload;
+
+ /* Transmission PQ is the first of the PF */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q);
+ p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q);
+
+ /* nvmetcp Pure-ACK PQ */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_conn->physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr);
+ r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr);
+ xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr);
+ uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr);
+ p_ramrod->nvmetcp.flags = p_conn->offl_flags;
+ p_ramrod->nvmetcp.default_cq = p_conn->default_cq;
+ p_ramrod->nvmetcp.initial_ack = 0;
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr,
+ p_conn->nvmetcp_cccid_itid_table_addr);
+ p_ramrod->nvmetcp.nvmetcp.cccid_max_range =
+ cpu_to_le16(p_conn->nvmetcp_cccid_max_range);
+ p_tcp = &p_ramrod->tcp;
+ qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi,
+ &p_tcp->remote_mac_addr_mid,
+ &p_tcp->remote_mac_addr_lo, p_conn->remote_mac);
+ qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi,
+ &p_tcp->local_mac_addr_mid,
+ &p_tcp->local_mac_addr_lo, p_conn->local_mac);
+ p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
+ p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
+ p_tcp->ip_version = p_conn->ip_version;
+ if (p_tcp->ip_version == TCP_IPV6) {
+ for (i = 0; i < 4; i++) {
+ p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]);
+ p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]);
+ }
+ } else {
+ p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]);
+ p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]);
+ }
+
+ p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
+ p_tcp->ttl = p_conn->ttl;
+ p_tcp->tos_or_tc = p_conn->tos_or_tc;
+ p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
+ p_tcp->local_port = cpu_to_le16(p_conn->local_port);
+ p_tcp->mss = cpu_to_le16(p_conn->mss);
+ p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+ p_tcp->connect_mode = p_conn->connect_mode;
+ p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
+ p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u32 dval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_update;
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->flags = p_conn->update_flag;
+ p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
+ dval = p_conn->max_recv_pdu_length;
+ p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
+ dval = p_conn->max_send_pdu_length;
+ p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
+ p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_spe_conn_termination *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate;
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->abortive = p_conn->abortive_dsconnect;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ return (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn **p_out_conn)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ };
+ struct qed_nvmetcp_pf_params *p_params = NULL;
+ struct qed_nvmetcp_conn *p_conn = NULL;
+ int rc = 0;
+
+ /* Try finding a free connection that can be used */
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list))
+ p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
+ struct qed_nvmetcp_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ *p_out_conn = p_conn;
+
+ return 0;
+ }
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ /* Need to allocate a new connection */
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+ p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+ if (!p_conn)
+ return -ENOMEM;
+
+ params.num_elems = p_params->num_r2tq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe);
+ params.elem_size = sizeof(struct nvmetcp_wqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
+ if (rc)
+ goto nomem_r2tq;
+
+ params.num_elems = p_params->num_uhq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
+ params.elem_size = sizeof(struct iscsi_uhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
+ if (rc)
+ goto nomem_uhq;
+
+ params.elem_size = sizeof(struct iscsi_xhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
+ if (rc)
+ goto nomem;
+
+ p_conn->free_on_delete = true;
+ *p_out_conn = p_conn;
+
+ return 0;
+
+nomem:
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+nomem_uhq:
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+nomem_r2tq:
+ kfree(p_conn);
+
+ return -ENOMEM;
+}
+
+static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn **p_out_conn)
+{
+ struct qed_nvmetcp_conn *p_conn = NULL;
+ int rc = 0;
+ u32 icid;
+
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ if (rc)
+ return rc;
+
+ rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn);
+ if (rc) {
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ qed_cxt_release_cid(p_hwfn, icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ return rc;
+ }
+
+ p_conn->icid = icid;
+ p_conn->conn_id = (u16)icid;
+ p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+ *p_out_conn = p_conn;
+
+ return rc;
+}
+
+static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn)
+{
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list);
+ qed_cxt_release_cid(p_hwfn, p_conn->icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+}
+
+static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn)
+{
+ qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+ kfree(p_conn);
+}
+
+int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvmetcp_info *p_nvmetcp_info;
+
+ p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL);
+ if (!p_nvmetcp_info)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_nvmetcp_info->free_list);
+ p_hwfn->p_nvmetcp_info = p_nvmetcp_info;
+
+ return 0;
+}
+
+void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn)
+{
+ spin_lock_init(&p_hwfn->p_nvmetcp_info->lock);
+}
+
+void qed_nvmetcp_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvmetcp_conn *p_conn = NULL;
+
+ if (!p_hwfn->p_nvmetcp_info)
+ return;
+
+ while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
+ struct qed_nvmetcp_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ qed_nvmetcp_free_connection(p_hwfn, p_conn);
+ }
+ }
+
+ kfree(p_hwfn->p_nvmetcp_info);
+ p_hwfn->p_nvmetcp_info = NULL;
+}
+
+static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ int rc;
+
+ /* Allocate a hashed connection */
+ hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
+ if (!hash_con)
+ return -ENOMEM;
+
+ /* Acquire the connection */
+ rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev),
+ &hash_con->con);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to acquire Connection\n");
+ kfree(hash_con);
+
+ return rc;
+ }
+
+ /* Added the connection to hash table */
+ *handle = hash_con->con->icid;
+ *fw_cid = hash_con->con->fw_cid;
+ hash_add(cdev->connections, &hash_con->node, *handle);
+ if (p_doorbell)
+ *p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev),
+ *handle);
+
+ return 0;
+}
+
+static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ hlist_del(&hash_con->node);
+ qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
+ kfree(hash_con);
+
+ return 0;
+}
+
+static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ struct qed_nvmetcp_conn *con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+
+ /* FW initializations */
+ con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE;
+ con->sq_pbl_addr = conn_info->sq_pbl_addr;
+ con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range;
+ con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr;
+ con->default_cq = conn_info->default_cq;
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0);
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1);
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1);
+
+ /* Networking and TCP stack initializations */
+ ether_addr_copy(con->local_mac, conn_info->src.mac);
+ ether_addr_copy(con->remote_mac, conn_info->dst.mac);
+ memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
+ memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
+ con->local_port = conn_info->src.port;
+ con->remote_port = conn_info->dst.port;
+ con->vlan_id = conn_info->vlan_id;
+
+ if (conn_info->timestamp_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1);
+
+ if (conn_info->delayed_ack_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1);
+
+ if (conn_info->tcp_keep_alive_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1);
+
+ if (conn_info->ecn_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1);
+
+ con->ip_version = conn_info->ip_version;
+ con->flow_label = QED_TCP_FLOW_LABEL;
+ con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
+ con->ka_timeout = conn_info->ka_timeout;
+ con->ka_interval = conn_info->ka_interval;
+ con->max_rt_time = conn_info->max_rt_time;
+ con->ttl = conn_info->ttl;
+ con->tos_or_tc = conn_info->tos_or_tc;
+ con->mss = conn_info->mss;
+ con->cwnd = conn_info->cwnd;
+ con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
+ con->connect_mode = 0;
+
+ return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_update_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ struct qed_nvmetcp_conn *con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+ SET_FIELD(con->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0);
+ SET_FIELD(con->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1);
+ if (conn_info->hdr_digest_en)
+ SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1);
+
+ if (conn_info->data_digest_en)
+ SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1);
+
+ /* Placeholder - initialize pfv, cpda, hpda */
+
+ con->max_seq_size = conn_info->max_io_size;
+ con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
+ con->max_send_pdu_length = conn_info->max_send_pdu_length;
+ con->first_seq_length = conn_info->max_io_size;
+
+ return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev,
+ u32 handle, u8 abrt_conn)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ hash_con->con->abortive_dsconnect = abrt_conn;
+
+ return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .ll2 = &qed_ll2_ops_pass,
+ .fill_dev_info = &qed_fill_nvmetcp_dev_info,
+ .register_ops = &qed_register_nvmetcp_ops,
+ .start = &qed_nvmetcp_start,
+ .stop = &qed_nvmetcp_stop,
+ .acquire_conn = &qed_nvmetcp_acquire_conn,
+ .release_conn = &qed_nvmetcp_release_conn,
+ .offload_conn = &qed_nvmetcp_offload_conn,
+ .update_conn = &qed_nvmetcp_update_conn,
+ .destroy_conn = &qed_nvmetcp_destroy_conn,
+ .clear_sq = &qed_nvmetcp_clear_conn_sq,
+ .add_src_tcp_port_filter = &qed_llh_add_src_tcp_port_filter,
+ .remove_src_tcp_port_filter = &qed_llh_remove_src_tcp_port_filter,
+ .add_dst_tcp_port_filter = &qed_llh_add_dst_tcp_port_filter,
+ .remove_dst_tcp_port_filter = &qed_llh_remove_dst_tcp_port_filter,
+ .clear_all_filters = &qed_llh_clear_all_filters,
+ .init_read_io = &init_nvmetcp_host_read_task,
+ .init_write_io = &init_nvmetcp_host_write_task,
+ .init_icreq_exchange = &init_nvmetcp_init_conn_req_task,
+ .init_task_cleanup = &init_cleanup_task_nvmetcp
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void)
+{
+ return &qed_nvmetcp_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_nvmetcp_ops);
+
+void qed_put_nvmetcp_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_nvmetcp_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
new file mode 100644
index 000000000000..e5e9d075bf4f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_H
+#define _QED_NVMETCP_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+#define QED_NVMETCP_FW_CQ_SIZE (4 * 1024)
+
+/* tcp parameters */
+#define QED_TCP_FLOW_LABEL 0
+#define QED_TCP_TWO_MSL_TIMER 4000
+#define QED_TCP_HALF_WAY_CLOSE_TIMEOUT 10
+#define QED_TCP_MAX_FIN_RT 2
+#define QED_TCP_SWS_TIMER 5000
+
+struct qed_nvmetcp_info {
+ spinlock_t lock; /* Connection resources. */
+ struct list_head free_list;
+ u16 max_num_outstanding_tasks;
+ void *event_context;
+ nvmetcp_event_cb_t event_cb;
+};
+
+struct qed_hash_nvmetcp_con {
+ struct hlist_node node;
+ struct qed_nvmetcp_conn *con;
+};
+
+struct qed_nvmetcp_conn {
+ struct list_head list_entry;
+ bool free_on_delete;
+ u16 conn_id;
+ u32 icid;
+ u32 fw_cid;
+ u8 layer_code;
+ u8 offl_flags;
+ u8 connect_mode;
+ dma_addr_t sq_pbl_addr;
+ struct qed_chain r2tq;
+ struct qed_chain xhq;
+ struct qed_chain uhq;
+ u8 local_mac[6];
+ u8 remote_mac[6];
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u16 vlan_id;
+ u16 tcp_flags;
+ u32 remote_ip[4];
+ u32 local_ip[4];
+ u32 flow_label;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u8 ttl;
+ u8 tos_or_tc;
+ u16 remote_port;
+ u16 local_port;
+ u16 mss;
+ u8 rcv_wnd_scale;
+ u32 rcv_wnd;
+ u32 cwnd;
+ u8 update_flag;
+ u8 default_cq;
+ u8 abortive_dsconnect;
+ u32 max_seq_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+ u32 first_seq_length;
+ u16 physical_q0;
+ u16 physical_q1;
+ u16 nvmetcp_cccid_max_range;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+};
+
+#if IS_ENABLED(CONFIG_QED_NVMETCP)
+int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn);
+void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn);
+void qed_nvmetcp_free(struct qed_hwfn *p_hwfn);
+
+#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+static inline int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn) {}
+static inline void qed_nvmetcp_free(struct qed_hwfn *p_hwfn) {}
+
+#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
new file mode 100644
index 000000000000..c1dd71d19f3f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include "qed_nvmetcp_fw_funcs.h"
+
+#define NVMETCP_NUM_SGES_IN_CACHE 0x4
+
+bool nvmetcp_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct storage_sgl_task_params *sgl_params)
+{
+ u8 num_sges_to_init = (u8)(sgl_params->num_sges > NVMETCP_NUM_SGES_IN_CACHE ?
+ NVMETCP_NUM_SGES_IN_CACHE : sgl_params->num_sges);
+ u8 sge_index;
+
+ /* sgl params */
+ ctx_sgl_params->sgl_addr.lo = cpu_to_le32(sgl_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.hi = cpu_to_le32(sgl_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_total_length = cpu_to_le32(sgl_params->total_buffer_size);
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+ ctx_data_desc->sge[sge_index].sge_addr.lo =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.hi =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_len =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_len);
+ }
+}
+
+static inline u32 calc_rw_task_size(struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type)
+{
+ u32 io_size;
+
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE)
+ io_size = task_params->tx_io_size;
+ else
+ io_size = task_params->rx_io_size;
+
+ if (unlikely(!io_size))
+ return 0;
+
+ return io_size;
+}
+
+static inline void init_sqe(struct nvmetcp_task_params *task_params,
+ struct storage_sgl_task_params *sgl_task_params,
+ enum nvmetcp_task_type task_type)
+{
+ if (!task_params->sqe)
+ return;
+
+ memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+ task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+
+ switch (task_type) {
+ case NVMETCP_TASK_TYPE_HOST_WRITE: {
+ u32 buf_size = 0;
+ u32 num_sges = 0;
+
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1);
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_NORMAL);
+ if (task_params->tx_io_size) {
+ if (task_params->send_write_incapsule)
+ buf_size = calc_rw_task_size(task_params, task_type);
+
+ if (nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = NVMETCP_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min((u16)sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size);
+ } break;
+
+ case NVMETCP_TASK_TYPE_HOST_READ: {
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_NORMAL);
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1);
+ } break;
+
+ case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST: {
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH);
+
+ if (task_params->tx_io_size) {
+ SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN,
+ task_params->tx_io_size);
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES,
+ min((u16)sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+ }
+ } break;
+
+ case NVMETCP_TASK_TYPE_CLEANUP:
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP);
+
+ default:
+ break;
+ }
+}
+
+/* The following function initializes of NVMeTCP task params */
+static inline void
+init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context,
+ struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type)
+{
+ context->ystorm_st_context.state.cccid = task_params->host_cccid;
+ SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1);
+ context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo);
+ context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi);
+}
+
+/* The following function initializes default values to all tasks */
+static inline void
+init_default_nvmetcp_task(struct nvmetcp_task_params *task_params,
+ void *pdu_header, void *nvme_cmd,
+ enum nvmetcp_task_type task_type)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+ const u8 val_byte = context->mstorm_ag_context.cdu_validation;
+ u8 dw_index;
+
+ memset(context, 0, sizeof(*context));
+ init_nvmetcp_task_params(context, task_params,
+ (enum nvmetcp_task_type)task_type);
+
+ /* Swapping requirements used below, will be removed in future FW versions */
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE ||
+ task_type == NVMETCP_TASK_TYPE_HOST_READ) {
+ for (dw_index = 0;
+ dw_index < QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index]));
+
+ for (dw_index = QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32);
+ dw_index < QED_NVMETCP_CMD_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)nvme_cmd)[dw_index - 2]));
+ } else {
+ for (dw_index = 0;
+ dw_index < QED_NVMETCP_NON_IO_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index]));
+ }
+
+ /* M-Storm Context: */
+ context->mstorm_ag_context.cdu_validation = val_byte;
+ context->mstorm_st_context.task_type = (u8)(task_type);
+ context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid);
+
+ /* Ustorm Context: */
+ SET_FIELD(context->ustorm_ag_context.flags1, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV, 1);
+ context->ustorm_st_context.task_type = (u8)(task_type);
+ context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+ context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+/* The following function initializes the U-Storm Task Contexts */
+static inline void
+init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx *ustorm_st_context,
+ struct e5_ustorm_nvmetcp_task_ag_ctx *ustorm_ag_context,
+ u32 remaining_recv_len,
+ u32 expected_data_transfer_len, u8 num_sges,
+ bool tx_dif_conn_err_en)
+{
+ /* Remaining data to be received in bytes. Used in validations*/
+ ustorm_st_context->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+ ustorm_ag_context->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+ ustorm_st_context->exp_data_transfer_len = cpu_to_le32(expected_data_transfer_len);
+ SET_FIELD(ustorm_st_context->reg1_map, REG1_NUM_SGES, num_sges);
+ SET_FIELD(ustorm_ag_context->flags2, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ tx_dif_conn_err_en ? 1 : 0);
+}
+
+/* The following function initializes Local Completion Contexts: */
+static inline void
+set_local_completion_context(struct e5_nvmetcp_task_context *context)
+{
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP, 1);
+ SET_FIELD(context->ustorm_st_context.flags,
+ USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+/* Common Fastpath task init function: */
+static inline void
+init_rw_nvmetcp_task(struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type,
+ void *pdu_header, void *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+ u32 task_size = calc_rw_task_size(task_params, task_type);
+ bool slow_io = false;
+ u8 num_sges = 0;
+
+ init_default_nvmetcp_task(task_params, pdu_header, nvme_cmd, task_type);
+
+ /* Tx/Rx: */
+ if (task_params->tx_io_size) {
+ /* if data to transmit: */
+ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params,
+ &context->ystorm_st_context.state.data_desc,
+ sgl_task_params);
+ slow_io = nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+ num_sges =
+ (u8)(!slow_io ? min((u32)sgl_task_params->num_sges,
+ (u32)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ NVMETCP_WQE_NUM_SGES_SLOWIO);
+ if (slow_io) {
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_NVMETCP_TASK_STATE_SLOW_IO, 1);
+ }
+ } else if (task_params->rx_io_size) {
+ /* if data to receive: */
+ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params,
+ &context->mstorm_st_context.data_desc,
+ sgl_task_params);
+ num_sges =
+ (u8)(!nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge) ?
+ min((u32)sgl_task_params->num_sges,
+ (u32)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ NVMETCP_WQE_NUM_SGES_SLOWIO);
+ context->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+ }
+
+ /* Ustorm context: */
+ init_ustorm_task_contexts(&context->ustorm_st_context,
+ &context->ustorm_ag_context,
+ /* Remaining Receive length is the Task Size */
+ task_size,
+ /* The size of the transmitted task */
+ task_size,
+ /* num_sges */
+ num_sges,
+ false);
+
+ /* Set exp_data_acked */
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) {
+ if (task_params->send_write_incapsule)
+ context->ustorm_ag_context.exp_data_acked = task_size;
+ else
+ context->ustorm_ag_context.exp_data_acked = 0;
+ } else if (task_type == NVMETCP_TASK_TYPE_HOST_READ) {
+ context->ustorm_ag_context.exp_data_acked = 0;
+ }
+
+ context->ustorm_ag_context.exp_cont_len = 0;
+ init_sqe(task_params, sgl_task_params, task_type);
+}
+
+static void
+init_common_initiator_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_READ,
+ cmd_pdu_header, nvme_cmd, sgl_task_params);
+}
+
+void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_common_initiator_read_task(task_params, (void *)cmd_pdu_header,
+ (void *)nvme_cmd, sgl_task_params);
+}
+
+static void
+init_common_initiator_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_WRITE,
+ cmd_pdu_header, nvme_cmd, sgl_task_params);
+}
+
+void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_common_initiator_write_task(task_params, (void *)cmd_pdu_header,
+ (void *)nvme_cmd, sgl_task_params);
+}
+
+static void
+init_common_login_request_task(struct nvmetcp_task_params *task_params,
+ void *login_req_pdu_header,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+
+ init_default_nvmetcp_task(task_params, (void *)login_req_pdu_header, NULL,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST);
+
+ /* Ustorm Context: */
+ init_ustorm_task_contexts(&context->ustorm_st_context,
+ &context->ustorm_ag_context,
+
+ /* Remaining Receive length is the Task Size */
+ task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0,
+
+ /* The size of the transmitted task */
+ task_params->tx_io_size ?
+ tx_sgl_task_params->total_buffer_size : 0,
+ 0, /* num_sges */
+ 0); /* tx_dif_conn_err_en */
+
+ /* SGL context: */
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params,
+ &context->ystorm_st_context.state.data_desc,
+ tx_sgl_task_params);
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params,
+ &context->mstorm_st_context.data_desc,
+ rx_sgl_task_params);
+
+ context->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0);
+ init_sqe(task_params, tx_sgl_task_params, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST);
+}
+
+/* The following function initializes Login task in Host mode: */
+void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params)
+{
+ init_common_login_request_task(task_params, init_conn_req_pdu_hdr,
+ tx_sgl_task_params, rx_sgl_task_params);
+}
+
+void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params)
+{
+ init_sqe(task_params, NULL, NVMETCP_TASK_TYPE_CLEANUP);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h
new file mode 100644
index 000000000000..1d5ddc217bdb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_FW_FUNCS_H
+#define _QED_NVMETCP_FW_FUNCS_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+
+#if IS_ENABLED(CONFIG_QED_NVMETCP)
+
+void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params);
+
+#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif /* _QED_NVMETCP_FW_FUNCS_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
new file mode 100644
index 000000000000..96a2077fd315
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright 2021 Marvell. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+
+#include <net/tcp.h>
+
+#include <linux/qed/qed_nvmetcp_ip_services_if.h>
+
+#define QED_IP_RESOL_TIMEOUT 4
+
+int qed_route_ipv4(struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *remote_addr,
+ struct sockaddr *hardware_address,
+ struct net_device **ndev)
+{
+ struct neighbour *neigh = NULL;
+ __be32 *loc_ip, *rem_ip;
+ struct rtable *rt;
+ int rc = -ENXIO;
+ int retry;
+
+ loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr;
+ rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr;
+ *ndev = NULL;
+ rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/);
+ if (IS_ERR(rt)) {
+ pr_err("lookup route failed\n");
+ rc = PTR_ERR(rt);
+ goto return_err;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, rem_ip);
+ if (!neigh) {
+ rc = -ENOMEM;
+ ip_rt_put(rt);
+ goto return_err;
+ }
+
+ *ndev = rt->dst.dev;
+ ip_rt_put(rt);
+
+ /* If not resolved, kick-off state machine towards resolution */
+ if (!(neigh->nud_state & NUD_VALID))
+ neigh_event_send(neigh, NULL);
+
+ /* query neighbor until resolved or timeout */
+ retry = QED_IP_RESOL_TIMEOUT;
+ while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
+ msleep(1000);
+ retry--;
+ }
+
+ if (neigh->nud_state & NUD_VALID) {
+ /* copy resolved MAC address */
+ neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev);
+ hardware_address->sa_family = (*ndev)->type;
+ rc = 0;
+ }
+
+ neigh_release(neigh);
+ if (!(*loc_ip)) {
+ *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE);
+ local_addr->ss_family = AF_INET;
+ }
+
+return_err:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_route_ipv4);
+
+int qed_route_ipv6(struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *remote_addr,
+ struct sockaddr *hardware_address,
+ struct net_device **ndev)
+{
+ struct neighbour *neigh = NULL;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ int rc = -ENXIO;
+ int retry;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr;
+ fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ pr_err("lookup route failed %d\n", dst->error);
+ }
+
+ goto out;
+ }
+
+ neigh = dst_neigh_lookup(dst, &fl6.daddr);
+ if (neigh) {
+ *ndev = ip6_dst_idev(dst)->dev;
+
+ /* If not resolved, kick-off state machine towards resolution */
+ if (!(neigh->nud_state & NUD_VALID))
+ neigh_event_send(neigh, NULL);
+
+ /* query neighbor until resolved or timeout */
+ retry = QED_IP_RESOL_TIMEOUT;
+ while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
+ msleep(1000);
+ retry--;
+ }
+
+ if (neigh->nud_state & NUD_VALID) {
+ neigh_ha_snapshot((u8 *)hardware_address->sa_data,
+ neigh, *ndev);
+ hardware_address->sa_family = (*ndev)->type;
+ rc = 0;
+ }
+
+ neigh_release(neigh);
+
+ if (ipv6_addr_any(&fl6.saddr)) {
+ if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev,
+ &fl6.daddr, 0, &fl6.saddr)) {
+ pr_err("Unable to find source IP address\n");
+ goto out;
+ }
+
+ local_addr->ss_family = AF_INET6;
+ ((struct sockaddr_in6 *)local_addr)->sin6_addr =
+ fl6.saddr;
+ }
+ }
+
+ dst_release(dst);
+
+out:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_route_ipv6);
+
+void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id)
+{
+ if (is_vlan_dev(*ndev)) {
+ *vlan_id = vlan_dev_vlan_id(*ndev);
+ *ndev = vlan_dev_real_dev(*ndev);
+ }
+}
+EXPORT_SYMBOL(qed_vlan_get_ndev);
+
+struct pci_dev *qed_validate_ndev(struct net_device *ndev)
+{
+ struct pci_dev *pdev = NULL;
+ struct net_device *upper;
+
+ for_each_pci_dev(pdev) {
+ if (pdev && pdev->driver &&
+ !strcmp(pdev->driver->name, "qede")) {
+ upper = pci_get_drvdata(pdev);
+ if (upper->ifindex == ndev->ifindex)
+ return pdev;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(qed_validate_ndev);
+
+__be16 qed_get_in_port(struct sockaddr_storage *sa)
+{
+ return sa->ss_family == AF_INET
+ ? ((struct sockaddr_in *)sa)->sin_port
+ : ((struct sockaddr_in6 *)sa)->sin6_port;
+}
+EXPORT_SYMBOL(qed_get_in_port);
+
+int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
+ struct socket **sock, u16 *port)
+{
+ struct sockaddr_storage sa;
+ int rc = 0;
+
+ rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
+ sock);
+ if (rc) {
+ pr_warn("failed to create socket: %d\n", rc);
+ goto err;
+ }
+
+ (*sock)->sk->sk_allocation = GFP_KERNEL;
+ sk_set_memalloc((*sock)->sk);
+
+ rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr,
+ sizeof(local_ip_addr));
+
+ if (rc) {
+ pr_warn("failed to bind socket: %d\n", rc);
+ goto err_sock;
+ }
+
+ rc = kernel_getsockname(*sock, (struct sockaddr *)&sa);
+ if (rc < 0) {
+ pr_warn("getsockname() failed: %d\n", rc);
+ goto err_sock;
+ }
+
+ *port = ntohs(qed_get_in_port(&sa));
+
+ return 0;
+
+err_sock:
+ sock_release(*sock);
+ sock = NULL;
+err:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_fetch_tcp_port);
+
+void qed_return_tcp_port(struct socket *sock)
+{
+ if (sock && sock->sk) {
+ tcp_set_state(sock->sk, TCP_CLOSE);
+ sock_release(sock);
+ }
+}
+EXPORT_SYMBOL(qed_return_tcp_port);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 88353aa404dc..b8c5641b29a8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -16,7 +16,7 @@
#include "qed_ll2.h"
#include "qed_ooo.h"
#include "qed_cxt.h"
-
+#include "qed_nvmetcp.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
struct qed_ooo_info
@@ -83,7 +83,8 @@ int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
switch (p_hwfn->hw_info.personality) {
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
break;
case QED_PCI_ETH_RDMA:
case QED_PCI_ETH_IWARP:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 993f1357b6fc..60ff3222bf55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -100,6 +100,11 @@ union ramrod_data {
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
struct iscsi_spe_conn_termination iscsi_conn_terminate;
+ struct nvmetcp_init_ramrod_params nvmetcp_init;
+ struct nvmetcp_spe_conn_offload nvmetcp_conn_offload;
+ struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update;
+ struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate;
+
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index aa71adcf31ee..b4ed54ffef9b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -385,7 +385,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_FCOE;
break;
case QED_PCI_ISCSI:
- p_ramrod->personality = PERSONALITY_ISCSI;
+ case QED_PCI_NVMETCP:
+ p_ramrod->personality = PERSONALITY_TCP_ULP;
break;
case QED_PCI_ETH_ROCE:
case QED_PCI_ETH_IWARP:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 8e150dd4f899..065e9004598e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1089,13 +1089,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
*len, false);
- /* Queues always have a full reset currently, so for the time
- * being until there's atomic program replace just mark read
- * side for map helpers.
- */
- rcu_read_lock();
act = bpf_prog_run_xdp(prog, &xdp);
- rcu_read_unlock();
/* Recalculate, as XDP might have changed the headers */
*data_offset = xdp.data - xdp.data_hard_start;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 2f6598086d9b..6304514a6f2c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -247,12 +247,10 @@ static struct qede_rdma_event_work *
qede_rdma_get_free_event_node(struct qede_dev *edev)
{
struct qede_rdma_event_work *event_node = NULL;
- struct list_head *list_node = NULL;
bool found = false;
- list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
- event_node = list_entry(list_node, struct qede_rdma_event_work,
- list);
+ list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list,
+ list) {
if (!work_pending(&event_node->work)) {
found = true;
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index d2c190732d3e..0a2f34fc8b24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -746,7 +746,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
}
/**
- * qlcnic_83xx_idc_cold_state
+ * qlcnic_83xx_idc_cold_state_handler
*
* @adapter: adapter structure
*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index c4297aea7d15..711609503ba6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -180,7 +180,7 @@ static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
}
/**
- * qlcnic_83xx_vnic_opmode
+ * qlcnic_83xx_config_vnic_opmode
*
* @adapter: adapter structure
* Identify virtual NIC operational modes.
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index e1b8490bed0a..4b8bc46f55c2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -460,12 +460,10 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
int err = -EINVAL;
/* Delete MAC from the existing list */
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal(addr, cur->mac_addr)) {
err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
@@ -483,11 +481,9 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan,
enum qlcnic_mac_type mac_type)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
/* look up if already exists */
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal(addr, cur->mac_addr) &&
cur->vlan_id == vlan)
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 601d22495a88..95ecc84dddcd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -203,7 +203,6 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
-int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 918220ad0d53..a4fa507903ee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -319,10 +319,8 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
@@ -3344,9 +3342,6 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
do {
msleep(1000);
prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
-
- if (prev_state == QLCNIC_DEV_QUISCENT)
- continue;
} while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
if (!dev_init_timeo) {