summaryrefslogtreecommitdiff
path: root/drivers/crypto/hisilicon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-22 04:23:56 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-22 04:23:56 +0300
commit31caf8b2a847214be856f843e251fc2ed2cd1075 (patch)
tree245257387cfcae352fae27b1ca824daab55472ba /drivers/crypto/hisilicon
parenta2b095e0efa7229a1a88602283ba1a8a32004851 (diff)
parent0de9dc80625b0ca1cb9730c5ed1c5a8cab538369 (diff)
downloadlinux-31caf8b2a847214be856f843e251fc2ed2cd1075.tar.xz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Restrict crypto_cipher to internal API users only. Algorithms: - Add x86 aesni acceleration for cts. - Improve x86 aesni acceleration for xts. - Remove x86 acceleration of some uncommon algorithms. - Remove RIPE-MD, Tiger and Salsa20. - Remove tnepres. - Add ARM acceleration for BLAKE2s and BLAKE2b. Drivers: - Add Keem Bay OCS HCU driver. - Add Marvell OcteonTX2 CPT PF driver. - Remove PicoXcell driver. - Remove mediatek driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (154 commits) hwrng: timeriomem - Use device-managed registration API crypto: hisilicon/qm - fix printing format issue crypto: hisilicon/qm - do not reset hardware when CE happens crypto: hisilicon/qm - update irqflag crypto: hisilicon/qm - fix the value of 'QM_SQC_VFT_BASE_MASK_V2' crypto: hisilicon/qm - fix request missing error crypto: hisilicon/qm - removing driver after reset crypto: octeontx2 - fix -Wpointer-bool-conversion warning crypto: hisilicon/hpre - enable Elliptic curve cryptography crypto: hisilicon - PASID fixed on Kunpeng 930 crypto: hisilicon/qm - fix use of 'dma_map_single' crypto: hisilicon/hpre - tiny fix crypto: hisilicon/hpre - adapt the number of clusters crypto: cpt - remove casting dma_alloc_coherent crypto: keembay-ocs-aes - Fix 'q' assignment during CCM B0 generation crypto: xor - Fix typo of optimization hwrng: optee - Use device-managed registration API crypto: arm64/crc-t10dif - move NEON yield to C code crypto: arm64/aes-ce-mac - simplify NEON yield crypto: arm64/aes-neonbs - remove NEON yield calls ...
Diffstat (limited to 'drivers/crypto/hisilicon')
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c169
-rw-r--r--drivers/crypto/hisilicon/qm.c193
-rw-r--r--drivers/crypto/hisilicon/qm.h33
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c42
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c23
6 files changed, 362 insertions, 106 deletions
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index f69252b24671..181c109b19f7 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -14,8 +14,7 @@ enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
HPRE_CLUSTER2,
- HPRE_CLUSTER3,
- HPRE_CLUSTERS_NUM,
+ HPRE_CLUSTER3
};
enum hpre_ctrl_dbgfs_file {
@@ -36,7 +35,10 @@ enum hpre_dfx_dbgfs_file {
HPRE_DFX_FILE_NUM
};
-#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
+#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1)
+#define HPRE_CLUSTERS_NUM_V3 1
+#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2
+#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
struct hpre_debugfs_file {
int index;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index e5c991913f09..e7a2c70eb9cf 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/topology.h>
+#include <linux/uacce.h>
#include "hpre.h"
#define HPRE_QUEUE_NUM_V2 1024
@@ -29,13 +30,14 @@
#define HPRE_BD_ARUSR_CFG 0x301030
#define HPRE_BD_AWUSR_CFG 0x301034
#define HPRE_TYPES_ENB 0x301038
+#define HPRE_RSA_ENB BIT(0)
+#define HPRE_ECC_ENB BIT(1)
#define HPRE_DATA_RUSER_CFG 0x30103c
#define HPRE_DATA_WUSER_CFG 0x301040
#define HPRE_INT_MASK 0x301400
#define HPRE_INT_STATUS 0x301800
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_CORE_INT_DISABLE 0x003fffff
-#define HPRE_RAS_ECC_1BIT_TH 0x30140c
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CLSTR_BASE 0x302000
#define HPRE_CORE_EN_OFFSET 0x04
@@ -45,7 +47,7 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE 0x1
+#define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23))
#define HPRE_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
@@ -73,7 +75,8 @@
#define HPRE_QM_AXI_CFG_MASK 0xffff
#define HPRE_QM_VFG_AX_MASK 0xff
#define HPRE_BD_USR_MASK 0x3
-#define HPRE_CLUSTER_CORE_MASK 0xf
+#define HPRE_CLUSTER_CORE_MASK_V2 0xf
+#define HPRE_CLUSTER_CORE_MASK_V3 0xff
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
@@ -86,6 +89,11 @@
#define HPRE_QM_PM_FLR BIT(11)
#define HPRE_QM_SRIOV_FLR BIT(12)
+#define HPRE_CLUSTERS_NUM(qm) \
+ (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 : HPRE_CLUSTERS_NUM_V2)
+#define HPRE_CLUSTER_CORE_MASK(qm) \
+ (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTER_CORE_MASK_V3 :\
+ HPRE_CLUSTER_CORE_MASK_V2)
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
@@ -129,7 +137,11 @@ static const struct hpre_hw_error hpre_hw_errors[] = {
{ .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
{ .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
{ .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
- { /* sentinel */ }
+ { .int_msk = BIT(22), .msg = "pt_rng_timeout_int_set"},
+ { .int_msk = BIT(23), .msg = "sva_fsm_timeout_int_set"},
+ {
+ /* sentinel */
+ }
};
static const u64 hpre_cluster_offsets[] = {
@@ -178,6 +190,19 @@ static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
"invalid_req_cnt"
};
+static const struct kernel_param_ops hpre_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means hpre only register to crypto,
+ * uacce_mode = 1 means hpre both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
@@ -214,6 +239,30 @@ struct hisi_qp *hpre_create_qp(void)
return NULL;
}
+static void hpre_pasid_enable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val |= BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
+ val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ val |= BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+}
+
+static void hpre_pasid_disable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val &= ~BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
+ val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ val &= ~BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+}
+
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -238,8 +287,40 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
+static int hpre_set_cluster(struct hisi_qm *qm)
+{
+ u32 cluster_core_mask = HPRE_CLUSTER_CORE_MASK(qm);
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ struct device *dev = &qm->pdev->dev;
+ unsigned long offset;
+ u32 val = 0;
+ int ret, i;
+
+ for (i = 0; i < clusters_num; i++) {
+ offset = i * HPRE_CLSTR_ADDR_INTRVL;
+
+ /* clusters initiating */
+ writel(cluster_core_mask,
+ HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
+ writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
+ HPRE_CORE_INI_STATUS), val,
+ ((val & cluster_core_mask) ==
+ cluster_core_mask),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev,
+ "cluster %d int st status timeout!\n", i);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
/*
- * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
* Or it may stay in D3 state when we bind and unbind hpre quickly,
* as it does FLR triggered by hardware.
*/
@@ -257,9 +338,8 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- unsigned long offset;
- int ret, i;
u32 val;
+ int ret;
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
@@ -270,11 +350,15 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
- writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ if (qm->ver >= QM_HW_V3)
+ writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
+ HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ else
+ writel(HPRE_RSA_ENB, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+
writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
- writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
@@ -291,37 +375,29 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
return -ETIMEDOUT;
}
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
- offset = i * HPRE_CLSTR_ADDR_INTRVL;
+ ret = hpre_set_cluster(qm);
+ if (ret)
+ return -ETIMEDOUT;
- /* clusters initiating */
- writel(HPRE_CLUSTER_CORE_MASK,
- HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
- writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
- ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
- HPRE_CORE_INI_STATUS), val,
- ((val & HPRE_CLUSTER_CORE_MASK) ==
- HPRE_CLUSTER_CORE_MASK),
- HPRE_REG_RD_INTVRL_US,
- HPRE_REG_RD_TMOUT_US);
- if (ret) {
- dev_err(dev,
- "cluster %d int st status timeout!\n", i);
- return -ETIMEDOUT;
- }
- }
+ /* This setting is only needed by Kunpeng 920. */
+ if (qm->ver == QM_HW_V2) {
+ ret = hpre_cfg_by_dsm(qm);
+ if (ret)
+ dev_err(dev, "acpi_evaluate_dsm err.\n");
- ret = hpre_cfg_by_dsm(qm);
- if (ret)
- dev_err(dev, "acpi_evaluate_dsm err.\n");
+ disable_flr_of_bme(qm);
- disable_flr_of_bme(qm);
+ /* Enable data buffer pasid */
+ if (qm->use_sva)
+ hpre_pasid_enable(qm);
+ }
return ret;
}
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
unsigned long offset;
int i;
@@ -330,7 +406,7 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
/* clear clusterX/cluster_ctrl */
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
}
@@ -629,13 +705,14 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
int i, ret;
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret < 0)
return -EINVAL;
@@ -734,6 +811,11 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return -EINVAL;
}
+ if (pdev->revision >= QM_HW_V3)
+ qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n";
+ else
+ qm->algs = "rsa\ndh\n";
+ qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
@@ -799,6 +881,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.fe = 0,
.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
HPRE_OOO_ECC_2BIT_ERR,
+ .dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE,
.msi_wr_port = HPRE_WR_MSI_PORT,
.acpi_rst = "HRST",
}
@@ -872,6 +955,14 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_with_qm_start;
}
+ if (qm->uacce) {
+ ret = uacce_register(qm->uacce);
+ if (ret) {
+ pci_err(pdev, "failed to register uacce (%d)!\n", ret);
+ goto err_with_alg_register;
+ }
+ }
+
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
@@ -904,20 +995,24 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_wait_task_finish(qm, &hpre_devices);
hisi_qm_alg_unregister(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
- ret = hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ ret = hisi_qm_sriov_disable(pdev, true);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
}
}
+
+ hpre_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+
if (qm->fun_type == QM_HW_PF) {
+ if (qm->use_sva && qm->ver == QM_HW_V2)
+ hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
+ hisi_qm_dev_err_uninit(qm);
}
- hpre_debugfs_exit(qm);
- hisi_qm_stop(qm, QM_NORMAL);
- hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f21ccae0e8ea..13cb4216561a 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -54,6 +54,8 @@
#define QM_SQ_PRIORITY_SHIFT 0
#define QM_SQ_ORDERS_SHIFT 4
#define QM_SQ_TYPE_SHIFT 8
+#define QM_QC_PASID_ENABLE 0x1
+#define QM_QC_PASID_ENABLE_SHIFT 7
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
@@ -120,7 +122,7 @@
#define QM_CQC_VFT_VALID (1ULL << 28)
#define QM_SQC_VFT_BASE_SHIFT_V2 28
-#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0)
+#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
@@ -147,7 +149,6 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_RAS_MSI_INT_SEL 0x1040f4
-#define QM_DEV_RESET_FLAG 0
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
#define ACC_VENDOR_ID_VALUE 0x5a5a
@@ -185,6 +186,10 @@
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
#define QM_EQ_DEPTH (1024 * 2)
+#define QM_DRIVER_REMOVING 0
+#define QM_RST_SCHED 1
+#define QM_RESETTING 2
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -619,6 +624,9 @@ static void qm_cq_head_update(struct hisi_qp *qp)
static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
{
+ if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
+ return;
+
if (qp->event_cb) {
qp->event_cb(qp);
return;
@@ -717,7 +725,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
dev_err(&qm->pdev->dev, "%s overflow\n",
qm_fifo_overflow[type]);
else
- dev_err(&qm->pdev->dev, "unknown error type %d\n",
+ dev_err(&qm->pdev->dev, "unknown error type %u\n",
type);
if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
@@ -1121,7 +1129,7 @@ static int dump_show(struct hisi_qm *qm, void *info,
dev_info(dev, "%s DUMP\n", info_name);
for (i = 0; i < info_size; i += BYTE_PER_DW) {
- pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+ pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
info_buf[i], info_buf[i + 1UL],
info_buf[i + 2UL], info_buf[i + 3UL]);
}
@@ -1154,7 +1162,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
ret = kstrtou32(s, 0, &qp_id);
if (ret || qp_id >= qm->qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
return -EINVAL;
}
@@ -1200,7 +1208,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
ret = kstrtou32(s, 0, &qp_id);
if (ret || qp_id >= qm->qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
return -EINVAL;
}
@@ -1279,7 +1287,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
ret = kstrtou32(presult, 0, q_id);
if (ret || *q_id >= qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
return -EINVAL;
}
@@ -1604,7 +1612,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp;
+ u32 error_status, tmp, val;
/* read err sts */
tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -1615,9 +1623,13 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status == QM_DB_RANDOM_INVALID) {
+ val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
+ /* ce error does not need to be reset */
+ if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
writel(error_status, qm->io_base +
QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_ini->err_info.nfe,
+ qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_RECOVERED;
}
@@ -1685,6 +1697,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp->req_cb = NULL;
qp->qp_id = qp_id;
qp->alg_type = alg_type;
+ qp->is_in_kernel = true;
qm->qp_in_used++;
atomic_set(&qp->qp_status.flags, QP_INIT);
@@ -1747,12 +1760,6 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
if (!sqc)
return -ENOMEM;
- sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, sqc_dma)) {
- kfree(sqc);
- return -ENOMEM;
- }
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
@@ -1765,6 +1772,17 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
sqc->cq_num = cpu_to_le16(qp_id);
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
+ if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
+ sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
+ QM_QC_PASID_ENABLE_SHIFT);
+
+ sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, sqc_dma)) {
+ kfree(sqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
kfree(sqc);
@@ -1784,12 +1802,6 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
if (!cqc)
return -ENOMEM;
- cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, cqc_dma)) {
- kfree(cqc);
- return -ENOMEM;
- }
INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
if (ver == QM_HW_V1) {
@@ -1802,6 +1814,16 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
}
cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
+ if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
+ cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
+
+ cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cqc_dma)) {
+ kfree(cqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
kfree(cqc);
@@ -1865,6 +1887,28 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
+ * qp_stop_fail_cb() - call request cb.
+ * @qp: stopped failed qp.
+ *
+ * Callback function should be called whether task completed or not.
+ */
+static void qp_stop_fail_cb(struct hisi_qp *qp)
+{
+ int qp_used = atomic_read(&qp->qp_status.used);
+ u16 cur_tail = qp->qp_status.sq_tail;
+ u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
+ struct hisi_qm *qm = qp->qm;
+ u16 pos;
+ int i;
+
+ for (i = 0; i < qp_used; i++) {
+ pos = (i + cur_head) % QM_Q_DEPTH;
+ qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
+ atomic_dec(&qp->qp_status.used);
+ }
+}
+
+/**
* qm_drain_qp() - Drain a qp.
* @qp: The qp we want to drain.
*
@@ -1959,6 +2003,9 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
else
flush_work(&qp->qm->work);
+ if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
+ qp_stop_fail_cb(qp);
+
dev_dbg(dev, "stop queue %u!", qp->qp_id);
return 0;
@@ -2065,6 +2112,7 @@ static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
qp->uacce_q = q;
qp->event_cb = qm_qp_event_notifier;
qp->pasid = arg;
+ qp->is_in_kernel = false;
return 0;
}
@@ -2206,7 +2254,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (IS_ERR(uacce))
return PTR_ERR(uacce);
- if (uacce->flags & UACCE_DEV_SVA) {
+ if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) {
qm->use_sva = true;
} else {
/* only consider sva case */
@@ -2248,17 +2296,15 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
*/
static int qm_frozen(struct hisi_qm *qm)
{
- down_write(&qm->qps_lock);
-
- if (qm->is_frozen) {
- up_write(&qm->qps_lock);
+ if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
return 0;
- }
+
+ down_write(&qm->qps_lock);
if (!qm->qp_in_used) {
qm->qp_in_used = qm->qp_num;
- qm->is_frozen = true;
up_write(&qm->qps_lock);
+ set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
return 0;
}
@@ -2311,6 +2357,10 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
msleep(WAIT_PERIOD);
}
+ while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
+ test_bit(QM_RESETTING, &qm->misc_ctl))
+ msleep(WAIT_PERIOD);
+
udelay(REMOVE_WAIT_DELAY);
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
@@ -2439,7 +2489,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
mutex_init(&qm->mailbox_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
- qm->is_frozen = false;
+ qm->misc_ctl = false;
}
static void hisi_qm_pci_uninit(struct hisi_qm *qm)
@@ -2558,15 +2608,9 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
dma_addr_t eqc_dma;
int ret;
- eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); //todo
+ eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
if (!eqc)
return -ENOMEM;
- eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, eqc_dma)) {
- kfree(eqc);
- return -ENOMEM;
- }
eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
@@ -2574,6 +2618,13 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, eqc_dma)) {
+ kfree(eqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -2591,6 +2642,11 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
if (!aeqc)
return -ENOMEM;
+
+ aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
+ aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
+ aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+
aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, aeqc_dma)) {
@@ -2598,10 +2654,6 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
- aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
- aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
-
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
kfree(aeqc);
@@ -2677,7 +2729,7 @@ int hisi_qm_start(struct hisi_qm *qm)
return -EPERM;
}
- dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
+ dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
if (!qm->qp_num) {
dev_err(dev, "qp_num should not be 0\n");
@@ -3112,7 +3164,7 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
mutex_unlock(&qm_list->lock);
if (ret)
- pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
+ pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
node, alg_type, qp_num);
err:
@@ -3248,7 +3300,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev, 0);
+ return hisi_qm_sriov_disable(pdev, false);
else
return hisi_qm_sriov_enable(pdev, num_vfs);
}
@@ -3269,12 +3321,19 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
- if (!qm->err_ini->log_dev_hw_err) {
- dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
- return ACC_ERR_NEED_RESET;
+ if (qm->err_ini->log_dev_hw_err)
+ qm->err_ini->log_dev_hw_err(qm, err_sts);
+
+ /* ce error does not need to be reset */
+ if ((err_sts | qm->err_ini->err_info.dev_ce_mask) ==
+ qm->err_ini->err_info.dev_ce_mask) {
+ if (qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm,
+ err_sts);
+
+ return ACC_ERR_RECOVERED;
}
- qm->err_ini->log_dev_hw_err(qm, err_sts);
return ACC_ERR_NEED_RESET;
}
@@ -3313,7 +3372,7 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
if (pdev->is_virtfn)
return PCI_ERS_RESULT_NONE;
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -3465,7 +3524,7 @@ static int qm_reset_prepare_ready(struct hisi_qm *qm)
int delay = 0;
/* All reset requests need to be queued for processing */
- while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
+ while (test_and_set_bit(QM_RESETTING, &pf_qm->misc_ctl)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return -EBUSY;
@@ -3489,6 +3548,7 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop VFs!\n");
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
}
@@ -3496,9 +3556,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop QM!\n");
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
+ clear_bit(QM_RST_SCHED, &qm->misc_ctl);
+
return 0;
}
@@ -3736,7 +3799,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
hisi_qm_dev_err_init(qm);
qm_restart_done(qm);
- clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return 0;
}
@@ -3749,18 +3812,23 @@ static int qm_controller_reset(struct hisi_qm *qm)
pci_info(pdev, "Controller resetting...\n");
ret = qm_controller_reset_prepare(qm);
- if (ret)
+ if (ret) {
+ clear_bit(QM_RST_SCHED, &qm->misc_ctl);
return ret;
+ }
ret = qm_soft_reset(qm);
if (ret) {
pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
ret = qm_controller_reset_done(qm);
- if (ret)
+ if (ret) {
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
+ }
pci_info(pdev, "Controller reset complete\n");
@@ -3867,8 +3935,6 @@ static bool qm_flr_reset_complete(struct pci_dev *pdev)
return false;
}
- clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
-
return true;
}
@@ -3912,6 +3978,8 @@ void hisi_qm_reset_done(struct pci_dev *pdev)
flr_done:
if (qm_flr_reset_complete(pdev))
pci_info(pdev, "FLR reset complete\n");
+
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
@@ -3922,7 +3990,9 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
ret = qm_process_dev_error(qm);
- if (ret == ACC_ERR_NEED_RESET)
+ if (ret == ACC_ERR_NEED_RESET &&
+ !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
+ !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
schedule_work(&qm->rst_work);
return IRQ_HANDLED;
@@ -3934,21 +4004,20 @@ static int qm_irq_register(struct hisi_qm *qm)
int ret;
ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, IRQF_SHARED, qm->dev_name, qm);
+ qm_irq, 0, qm->dev_name, qm);
if (ret)
return ret;
if (qm->ver != QM_HW_V1) {
ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
+ qm_aeq_irq, 0, qm->dev_name, qm);
if (ret)
goto err_aeq_irq;
if (qm->fun_type == QM_HW_PF) {
ret = request_irq(pci_irq_vector(pdev,
QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, IRQF_SHARED,
- qm->dev_name, qm);
+ qm_abnormal_irq, 0, qm->dev_name, qm);
if (ret)
goto err_abonormal_irq;
}
@@ -4004,6 +4073,9 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
int flag = 0;
int ret = 0;
+ /* HW V2 not support both use uacce sva mode and hardware crypto algs */
+ if (qm->ver <= QM_HW_V2 && qm->use_sva)
+ return 0;
mutex_lock(&qm_list->lock);
if (list_empty(&qm_list->list))
@@ -4035,6 +4107,9 @@ EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
*/
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
+ if (qm->ver <= QM_HW_V2 && qm->use_sva)
+ return;
+
mutex_lock(&qm_list->lock);
list_del(&qm->list);
mutex_unlock(&qm_list->lock);
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 8624d1288afe..54967c6b9c78 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -85,6 +85,11 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+/* uacce mode of the driver */
+#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
+#define UACCE_MODE_SVA 1 /* use uacce sva mode */
+#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+
enum qm_stop_reason {
QM_NORMAL,
QM_SOFT_RESET,
@@ -168,6 +173,7 @@ struct hisi_qm_err_info {
char *acpi_rst;
u32 msi_wr_port;
u32 ecc_2bits_mask;
+ u32 dev_ce_mask;
u32 ce;
u32 nfe;
u32 fe;
@@ -225,7 +231,7 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
struct hisi_qm_err_status err_status;
- unsigned long reset_flag;
+ unsigned long misc_ctl; /* driver removing and reset sched */
struct rw_semaphore qps_lock;
struct idr qp_idr;
@@ -249,6 +255,7 @@ struct hisi_qm {
resource_size_t phys_base;
resource_size_t phys_size;
struct uacce_device *uacce;
+ int mode;
};
struct hisi_qp_status {
@@ -282,6 +289,7 @@ struct hisi_qp {
struct hisi_qm *qm;
bool is_resetting;
+ bool is_in_kernel;
u16 pasid;
struct uacce_queue *uacce_q;
};
@@ -299,7 +307,7 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp,
if (!pdev) {
q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
+ pr_info("No device found currently, suppose queue number is %u\n",
q_num);
} else {
if (pdev->revision == QM_HW_V1)
@@ -333,6 +341,27 @@ static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_SVA &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
+}
+
static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
{
INIT_LIST_HEAD(&qm_list->list);
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index b35c1c2271a3..dc68ba76f65e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
+#include <linux/uacce.h>
#include "sec.h"
@@ -74,6 +75,16 @@
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24)
+#define SEC_USER1_ENABLE_DATA_SSV BIT(16)
+#define SEC_USER1_WB_CONTEXT_SSV BIT(8)
+#define SEC_USER1_WB_DATA_SSV BIT(0)
+#define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \
+ SEC_USER1_ENABLE_DATA_SSV | \
+ SEC_USER1_WB_CONTEXT_SSV | \
+ SEC_USER1_WB_DATA_SSV)
+#define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
+#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_DELAY_10_US 10
@@ -233,6 +244,18 @@ struct hisi_qp **sec_create_qps(void)
return NULL;
}
+static const struct kernel_param_ops sec_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means sec only register to crypto,
+ * uacce_mode = 1 means sec both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
@@ -299,7 +322,11 @@ static int sec_engine_init(struct hisi_qm *qm)
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
- reg |= SEC_USER1_SMMU_NORMAL;
+ reg &= SEC_USER1_SMMU_MASK;
+ if (qm->use_sva && qm->ver == QM_HW_V2)
+ reg |= SEC_USER1_SMMU_SVA;
+ else
+ reg |= SEC_USER1_SMMU_NORMAL;
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
writel(SEC_SINGLE_PORT_MAX_TRANS,
@@ -725,6 +752,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
+ .dev_ce_mask = SEC_RAS_CE_ENB_MSK,
.msi_wr_port = BIT(0),
.acpi_rst = "SRST",
}
@@ -758,6 +786,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
+ qm->algs = "cipher\ndigest\naead\n";
+ qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -885,6 +915,14 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_stop;
}
+ if (qm->uacce) {
+ ret = uacce_register(qm->uacce);
+ if (ret) {
+ pci_err(pdev, "failed to register uacce (%d)!\n", ret);
+ goto err_alg_unregister;
+ }
+ }
+
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
@@ -912,7 +950,7 @@ static void sec_remove(struct pci_dev *pdev)
hisi_qm_wait_task_finish(qm, &sec_devices);
hisi_qm_alg_unregister(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ hisi_qm_sriov_disable(pdev, true);
sec_debugfs_exit(qm);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 4fb5a32bf830..02c445722445 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -66,6 +66,7 @@
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
+#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
@@ -211,6 +212,19 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
};
+static const struct kernel_param_ops zip_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means zip only register to crypto,
+ * uacce_mode = 1 means zip both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
@@ -279,7 +293,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
- if (qm->use_sva) {
+ if (qm->use_sva && qm->ver == QM_HW_V2) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
@@ -314,7 +328,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(HZIP_CORE_INT_RAS_CE_ENABLE,
+ qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
@@ -714,6 +729,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
+ .dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE,
.msi_wr_port = HZIP_WR_PORT,
.acpi_rst = "ZRST",
}
@@ -752,6 +768,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
qm->algs = "zlib\ngzip";
+ qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -887,7 +904,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_qm_alg_unregister(qm, &zip_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ hisi_qm_sriov_disable(pdev, true);
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);