diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 06:49:58 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 06:49:58 +0300 |
commit | 642356cb5f4a8c82b5ca5ebac288c327d10df236 (patch) | |
tree | 85bdf911a1307d33838449cb8209b828dcfef1c7 /drivers/crypto/hisilicon | |
parent | f838767555d40f29bc4771c5c8cc63193094b7cc (diff) | |
parent | 4ee812f6143d78d8ba1399671d78c8d78bf2817c (diff) | |
download | linux-642356cb5f4a8c82b5ca5ebac288c327d10df236.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Add library interfaces of certain crypto algorithms for WireGuard
- Remove the obsolete ablkcipher and blkcipher interfaces
- Move add_early_randomness() out of rng_mutex
Algorithms:
- Add blake2b shash algorithm
- Add blake2s shash algorithm
- Add curve25519 kpp algorithm
- Implement 4 way interleave in arm64/gcm-ce
- Implement ciphertext stealing in powerpc/spe-xts
- Add Eric Biggers's scalar accelerated ChaCha code for ARM
- Add accelerated 32r2 code from Zinc for MIPS
- Add OpenSSL/CRYPTOGRAMS poly1305 implementation for ARM and MIPS
Drivers:
- Fix entropy reading failures in ks-sa
- Add support for sam9x60 in atmel
- Add crypto accelerator for amlogic GXL
- Add sun8i-ce Crypto Engine
- Add sun8i-ss cryptographic offloader
- Add a host of algorithms to inside-secure
- Add NPCM RNG driver
- add HiSilicon HPRE accelerator
- Add HiSilicon TRNG driver"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (285 commits)
crypto: vmx - Avoid weird build failures
crypto: lib/chacha20poly1305 - use chacha20_crypt()
crypto: x86/chacha - only unregister algorithms if registered
crypto: chacha_generic - remove unnecessary setkey() functions
crypto: amlogic - enable working on big endian kernel
crypto: sun8i-ce - enable working on big endian
crypto: mips/chacha - select CRYPTO_SKCIPHER, not CRYPTO_BLKCIPHER
hwrng: ks-sa - Enable COMPILE_TEST
crypto: essiv - remove redundant null pointer check before kfree
crypto: atmel-aes - Change data type for "lastc" buffer
crypto: atmel-tdes - Set the IV after {en,de}crypt
crypto: sun4i-ss - fix big endian issues
crypto: sun4i-ss - hide the Invalid keylen message
crypto: sun4i-ss - use crypto_ahash_digestsize
crypto: sun4i-ss - remove dependency on not 64BIT
crypto: sun4i-ss - Fix 64-bit size_t warnings on sun4i-ss-hash.c
MAINTAINERS: Add maintainer for HiSilicon SEC V2 driver
crypto: hisilicon - add DebugFS for HiSilicon SEC
Documentation: add DebugFS doc for HiSilicon SEC
crypto: hisilicon - add SRIOV for HiSilicon SEC
...
Diffstat (limited to 'drivers/crypto/hisilicon')
-rw-r--r-- | drivers/crypto/hisilicon/Kconfig | 45 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/Makefile | 6 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/hpre/Makefile | 2 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/hpre/hpre.h | 83 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/hpre/hpre_crypto.c | 1137 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/hpre/hpre_main.c | 1052 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/qm.c | 142 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/qm.h | 17 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/Makefile | 2 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec.h | 156 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_crypto.c | 889 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_crypto.h | 198 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_main.c | 1095 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sgl.c | 184 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sgl.h | 24 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/zip/zip.h | 1 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/zip/zip_crypto.c | 46 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/zip/zip_main.c | 294 |
18 files changed, 5063 insertions, 310 deletions
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index ebaf91e0146d..c0e7a85fe129 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -2,7 +2,7 @@ config CRYPTO_DEV_HISI_SEC tristate "Support for Hisilicon SEC crypto block cipher accelerator" - select CRYPTO_BLKCIPHER + select CRYPTO_SKCIPHER select CRYPTO_ALGAPI select CRYPTO_LIB_DES select SG_SPLIT @@ -14,26 +14,47 @@ config CRYPTO_DEV_HISI_SEC To compile this as a module, choose M here: the module will be called hisi_sec. +config CRYPTO_DEV_HISI_SEC2 + tristate "Support for HiSilicon SEC2 crypto block cipher accelerator" + select CRYPTO_BLKCIPHER + select CRYPTO_ALGAPI + select CRYPTO_LIB_DES + select CRYPTO_DEV_HISI_QM + depends on PCI && PCI_MSI + depends on ARM64 || (COMPILE_TEST && 64BIT) + help + Support for HiSilicon SEC Engine of version 2 in crypto subsystem. + It provides AES, SM4, and 3DES algorithms with ECB + CBC, and XTS cipher mode. + + To compile this as a module, choose M here: the module + will be called hisi_sec2. + config CRYPTO_DEV_HISI_QM tristate - depends on ARM64 && PCI && PCI_MSI + depends on ARM64 || COMPILE_TEST + depends on PCI && PCI_MSI help HiSilicon accelerator engines use a common queue management interface. Specific engine driver may use this module. -config CRYPTO_HISI_SGL - tristate - depends on ARM64 - help - HiSilicon accelerator engines use a common hardware scatterlist - interface for data format. Specific engine driver may use this - module. - config CRYPTO_DEV_HISI_ZIP tristate "Support for HiSilicon ZIP accelerator" - depends on ARM64 && PCI && PCI_MSI + depends on PCI && PCI_MSI + depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on !CPU_BIG_ENDIAN || COMPILE_TEST select CRYPTO_DEV_HISI_QM - select CRYPTO_HISI_SGL select SG_SPLIT help Support for HiSilicon ZIP Driver + +config CRYPTO_DEV_HISI_HPRE + tristate "Support for HISI HPRE accelerator" + depends on PCI && PCI_MSI + depends on ARM64 || (COMPILE_TEST && 64BIT) + select CRYPTO_DEV_HISI_QM + select CRYPTO_DH + select CRYPTO_RSA + help + Support for HiSilicon HPRE(High Performance RSA Engine) + accelerator, which can accelerate RSA and DH algorithms. diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile index 45a279741126..7f5f74c72baa 100644 --- a/drivers/crypto/hisilicon/Makefile +++ b/drivers/crypto/hisilicon/Makefile @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ -obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o -obj-$(CONFIG_CRYPTO_HISI_SGL) += sgl.o +obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/ +obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o +hisi_qm-objs = qm.o sgl.o obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/ diff --git a/drivers/crypto/hisilicon/hpre/Makefile b/drivers/crypto/hisilicon/hpre/Makefile new file mode 100644 index 000000000000..4fd32b789e1e --- /dev/null +++ b/drivers/crypto/hisilicon/hpre/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hisi_hpre.o +hisi_hpre-objs = hpre_main.o hpre_crypto.o diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h new file mode 100644 index 000000000000..ddf13ea9862a --- /dev/null +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 HiSilicon Limited. */ +#ifndef __HISI_HPRE_H +#define __HISI_HPRE_H + +#include <linux/list.h> +#include "../qm.h" + +#define HPRE_SQE_SIZE sizeof(struct hpre_sqe) +#define HPRE_PF_DEF_Q_NUM 64 +#define HPRE_PF_DEF_Q_BASE 0 + +enum { + HPRE_CLUSTER0, + HPRE_CLUSTER1, + HPRE_CLUSTER2, + HPRE_CLUSTER3, + HPRE_CLUSTERS_NUM, +}; + +enum hpre_ctrl_dbgfs_file { + HPRE_CURRENT_QM, + HPRE_CLEAR_ENABLE, + HPRE_CLUSTER_CTRL, + HPRE_DEBUG_FILE_NUM, +}; + +#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1) + +struct hpre_debugfs_file { + int index; + enum hpre_ctrl_dbgfs_file type; + spinlock_t lock; + struct hpre_debug *debug; +}; + +/* + * One HPRE controller has one PF and multiple VFs, some global configurations + * which PF has need this structure. + * Just relevant for PF. + */ +struct hpre_debug { + struct dentry *debug_root; + struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM]; +}; + +struct hpre { + struct hisi_qm qm; + struct list_head list; + struct hpre_debug debug; + u32 num_vfs; + unsigned long status; +}; + +enum hpre_alg_type { + HPRE_ALG_NC_NCRT = 0x0, + HPRE_ALG_NC_CRT = 0x1, + HPRE_ALG_KG_STD = 0x2, + HPRE_ALG_KG_CRT = 0x3, + HPRE_ALG_DH_G2 = 0x4, + HPRE_ALG_DH = 0x5, +}; + +struct hpre_sqe { + __le32 dw0; + __u8 task_len1; + __u8 task_len2; + __u8 mrttest_num; + __u8 resv1; + __le64 key; + __le64 in; + __le64 out; + __le16 tag; + __le16 resv2; +#define _HPRE_SQE_ALIGN_EXT 7 + __le32 rsvd1[_HPRE_SQE_ALIGN_EXT]; +}; + +struct hpre *hpre_find_device(int node); +int hpre_algs_register(void); +void hpre_algs_unregister(void); + +#endif diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c new file mode 100644 index 000000000000..98f037e6ea3e --- /dev/null +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -0,0 +1,1137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ +#include <crypto/akcipher.h> +#include <crypto/dh.h> +#include <crypto/internal/akcipher.h> +#include <crypto/internal/kpp.h> +#include <crypto/internal/rsa.h> +#include <crypto/kpp.h> +#include <crypto/scatterwalk.h> +#include <linux/dma-mapping.h> +#include <linux/fips.h> +#include <linux/module.h> +#include "hpre.h" + +struct hpre_ctx; + +#define HPRE_CRYPTO_ALG_PRI 1000 +#define HPRE_ALIGN_SZ 64 +#define HPRE_BITS_2_BYTES_SHIFT 3 +#define HPRE_RSA_512BITS_KSZ 64 +#define HPRE_RSA_1536BITS_KSZ 192 +#define HPRE_CRT_PRMS 5 +#define HPRE_CRT_Q 2 +#define HPRE_CRT_P 3 +#define HPRE_CRT_INV 4 +#define HPRE_DH_G_FLAG 0x02 +#define HPRE_TRY_SEND_TIMES 100 +#define HPRE_INVLD_REQ_ID (-1) +#define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev)) + +#define HPRE_SQE_ALG_BITS 5 +#define HPRE_SQE_DONE_SHIFT 30 +#define HPRE_DH_MAX_P_SZ 512 + +typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); + +struct hpre_rsa_ctx { + /* low address: e--->n */ + char *pubkey; + dma_addr_t dma_pubkey; + + /* low address: d--->n */ + char *prikey; + dma_addr_t dma_prikey; + + /* low address: dq->dp->q->p->qinv */ + char *crt_prikey; + dma_addr_t dma_crt_prikey; + + struct crypto_akcipher *soft_tfm; +}; + +struct hpre_dh_ctx { + /* + * If base is g we compute the public key + * ya = g^xa mod p; [RFC2631 sec 2.1.1] + * else if base if the counterpart public key we + * compute the shared secret + * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] + */ + char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */ + dma_addr_t dma_xa_p; + + char *g; /* m */ + dma_addr_t dma_g; +}; + +struct hpre_ctx { + struct hisi_qp *qp; + struct hpre_asym_request **req_list; + spinlock_t req_lock; + unsigned int key_sz; + bool crt_g2_mode; + struct idr req_idr; + union { + struct hpre_rsa_ctx rsa; + struct hpre_dh_ctx dh; + }; +}; + +struct hpre_asym_request { + char *src; + char *dst; + struct hpre_sqe req; + struct hpre_ctx *ctx; + union { + struct akcipher_request *rsa; + struct kpp_request *dh; + } areq; + int err; + int req_id; + hpre_cb cb; +}; + +static DEFINE_MUTEX(hpre_alg_lock); +static unsigned int hpre_active_devs; + +static int hpre_alloc_req_id(struct hpre_ctx *ctx) +{ + unsigned long flags; + int id; + + spin_lock_irqsave(&ctx->req_lock, flags); + id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); + spin_unlock_irqrestore(&ctx->req_lock, flags); + + return id; +} + +static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) +{ + unsigned long flags; + + spin_lock_irqsave(&ctx->req_lock, flags); + idr_remove(&ctx->req_idr, req_id); + spin_unlock_irqrestore(&ctx->req_lock, flags); +} + +static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) +{ + struct hpre_ctx *ctx; + int id; + + ctx = hpre_req->ctx; + id = hpre_alloc_req_id(ctx); + if (id < 0) + return -EINVAL; + + ctx->req_list[id] = hpre_req; + hpre_req->req_id = id; + + return id; +} + +static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) +{ + struct hpre_ctx *ctx = hpre_req->ctx; + int id = hpre_req->req_id; + + if (hpre_req->req_id >= 0) { + hpre_req->req_id = HPRE_INVLD_REQ_ID; + ctx->req_list[id] = NULL; + hpre_free_req_id(ctx, id); + } +} + +static struct hisi_qp *hpre_get_qp_and_start(void) +{ + struct hisi_qp *qp; + struct hpre *hpre; + int ret; + + /* find the proper hpre device, which is near the current CPU core */ + hpre = hpre_find_device(cpu_to_node(smp_processor_id())); + if (!hpre) { + pr_err("Can not find proper hpre device!\n"); + return ERR_PTR(-ENODEV); + } + + qp = hisi_qm_create_qp(&hpre->qm, 0); + if (IS_ERR(qp)) { + pci_err(hpre->qm.pdev, "Can not create qp!\n"); + return ERR_PTR(-ENODEV); + } + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) { + hisi_qm_release_qp(qp); + pci_err(hpre->qm.pdev, "Can not start qp!\n"); + return ERR_PTR(-EINVAL); + } + + return qp; +} + +static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len, + int is_src, dma_addr_t *tmp) +{ + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + enum dma_data_direction dma_dir; + + if (is_src) { + hpre_req->src = NULL; + dma_dir = DMA_TO_DEVICE; + } else { + hpre_req->dst = NULL; + dma_dir = DMA_FROM_DEVICE; + } + *tmp = dma_map_single(dev, sg_virt(data), + len, dma_dir); + if (dma_mapping_error(dev, *tmp)) { + dev_err(dev, "dma map data err!\n"); + return -ENOMEM; + } + + return 0; +} + +static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len, + int is_src, dma_addr_t *tmp) +{ + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + void *ptr; + int shift; + + shift = ctx->key_sz - len; + if (shift < 0) + return -EINVAL; + + ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (is_src) { + scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0); + hpre_req->src = ptr; + } else { + hpre_req->dst = ptr; + } + + return 0; +} + +static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len, + int is_src, int is_dh) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + dma_addr_t tmp; + int ret; + + /* when the data is dh's source, we should format it */ + if ((sg_is_last(data) && len == ctx->key_sz) && + ((is_dh && !is_src) || !is_dh)) + ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp); + else + ret = hpre_prepare_dma_buf(hpre_req, data, len, + is_src, &tmp); + if (ret) + return ret; + + if (is_src) + msg->in = cpu_to_le64(tmp); + else + msg->out = cpu_to_le64(tmp); + + return 0; +} + +static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, + struct hpre_asym_request *req, + struct scatterlist *dst, struct scatterlist *src) +{ + struct device *dev = HPRE_DEV(ctx); + struct hpre_sqe *sqe = &req->req; + dma_addr_t tmp; + + tmp = le64_to_cpu(sqe->in); + if (!tmp) + return; + + if (src) { + if (req->src) + dma_free_coherent(dev, ctx->key_sz, + req->src, tmp); + else + dma_unmap_single(dev, tmp, + ctx->key_sz, DMA_TO_DEVICE); + } + + tmp = le64_to_cpu(sqe->out); + if (!tmp) + return; + + if (req->dst) { + if (dst) + scatterwalk_map_and_copy(req->dst, dst, 0, + ctx->key_sz, 1); + dma_free_coherent(dev, ctx->key_sz, req->dst, tmp); + } else { + dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE); + } +} + +static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, + void **kreq) +{ + struct hpre_asym_request *req; + int err, id, done; + +#define HPRE_NO_HW_ERR 0 +#define HPRE_HW_TASK_DONE 3 +#define HREE_HW_ERR_MASK 0x7ff +#define HREE_SQE_DONE_MASK 0x3 + id = (int)le16_to_cpu(sqe->tag); + req = ctx->req_list[id]; + hpre_rm_req_from_ctx(req); + *kreq = req; + + err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) & + HREE_HW_ERR_MASK; + + done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & + HREE_SQE_DONE_MASK; + + if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE) + return 0; + + return -EINVAL; +} + +static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) +{ + if (!ctx || !qp || qlen < 0) + return -EINVAL; + + spin_lock_init(&ctx->req_lock); + ctx->qp = qp; + + ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); + if (!ctx->req_list) + return -ENOMEM; + ctx->key_sz = 0; + ctx->crt_g2_mode = false; + idr_init(&ctx->req_idr); + + return 0; +} + +static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) +{ + if (is_clear_all) { + idr_destroy(&ctx->req_idr); + kfree(ctx->req_list); + hisi_qm_release_qp(ctx->qp); + } + + ctx->crt_g2_mode = false; + ctx->key_sz = 0; +} + +static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp) +{ + struct hpre_asym_request *req; + struct kpp_request *areq; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.dh; + areq->dst_len = ctx->key_sz; + hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); +} + +static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) +{ + struct hpre_asym_request *req; + struct akcipher_request *areq; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.rsa; + areq->dst_len = ctx->key_sz; + hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); + akcipher_request_complete(areq, ret); +} + +static void hpre_alg_cb(struct hisi_qp *qp, void *resp) +{ + struct hpre_ctx *ctx = qp->qp_ctx; + struct hpre_sqe *sqe = resp; + + ctx->req_list[sqe->tag]->cb(ctx, resp); +} + +static int hpre_ctx_init(struct hpre_ctx *ctx) +{ + struct hisi_qp *qp; + + qp = hpre_get_qp_and_start(); + if (IS_ERR(qp)) + return PTR_ERR(qp); + + qp->qp_ctx = ctx; + qp->req_cb = hpre_alg_cb; + + return hpre_ctx_set(ctx, qp, QM_Q_DEPTH); +} + +static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) +{ + struct hpre_asym_request *h_req; + struct hpre_sqe *msg; + int req_id; + void *tmp; + + if (is_rsa) { + struct akcipher_request *akreq = req; + + if (akreq->dst_len < ctx->key_sz) { + akreq->dst_len = ctx->key_sz; + return -EOVERFLOW; + } + + tmp = akcipher_request_ctx(akreq); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_rsa_cb; + h_req->areq.rsa = akreq; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + } else { + struct kpp_request *kreq = req; + + if (kreq->dst_len < ctx->key_sz) { + kreq->dst_len = ctx->key_sz; + return -EOVERFLOW; + } + + tmp = kpp_request_ctx(kreq); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_dh_cb; + h_req->areq.dh = kreq; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p); + } + + msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT); + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; + h_req->ctx = ctx; + + req_id = hpre_add_req_to_ctx(h_req); + if (req_id < 0) + return -EBUSY; + + msg->tag = cpu_to_le16((u16)req_id); + + return 0; +} + +#ifdef CONFIG_CRYPTO_DH +static int hpre_dh_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + void *tmp = kpp_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ctr = 0; + int ret; + + if (!ctx) + return -EINVAL; + + ret = hpre_msg_request_set(ctx, req, false); + if (ret) + return ret; + + if (req->src) { + ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1); + if (ret) + goto clear_all; + } + + ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1); + if (ret) + goto clear_all; + + if (ctx->crt_g2_mode && !req->src) + msg->dw0 |= HPRE_ALG_DH_G2; + else + msg->dw0 |= HPRE_ALG_DH; + do { + ret = hisi_qp_send(ctx->qp, msg); + } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); + + /* success */ + if (!ret) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + + return ret; +} + +static int hpre_is_dh_params_length_valid(unsigned int key_sz) +{ +#define _HPRE_DH_GRP1 768 +#define _HPRE_DH_GRP2 1024 +#define _HPRE_DH_GRP5 1536 +#define _HPRE_DH_GRP14 2048 +#define _HPRE_DH_GRP15 3072 +#define _HPRE_DH_GRP16 4096 + switch (key_sz) { + case _HPRE_DH_GRP1: + case _HPRE_DH_GRP2: + case _HPRE_DH_GRP5: + case _HPRE_DH_GRP14: + case _HPRE_DH_GRP15: + case _HPRE_DH_GRP16: + return 0; + } + + return -EINVAL; +} + +static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz; + + if (params->p_size > HPRE_DH_MAX_P_SZ) + return -EINVAL; + + if (hpre_is_dh_params_length_valid(params->p_size << + HPRE_BITS_2_BYTES_SHIFT)) + return -EINVAL; + + sz = ctx->key_sz = params->p_size; + ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, + &ctx->dh.dma_xa_p, GFP_KERNEL); + if (!ctx->dh.xa_p) + return -ENOMEM; + + memcpy(ctx->dh.xa_p + sz, params->p, sz); + + /* If g equals 2 don't copy it */ + if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) { + ctx->crt_g2_mode = true; + return 0; + } + + ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL); + if (!ctx->dh.g) { + dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, + ctx->dh.dma_xa_p); + ctx->dh.xa_p = NULL; + return -ENOMEM; + } + + memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size); + + return 0; +} + +static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz = ctx->key_sz; + + if (is_clear_all) + hisi_qm_stop_qp(ctx->qp); + + if (ctx->dh.g) { + memset(ctx->dh.g, 0, sz); + dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); + ctx->dh.g = NULL; + } + + if (ctx->dh.xa_p) { + memset(ctx->dh.xa_p, 0, sz); + dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, + ctx->dh.dma_xa_p); + ctx->dh.xa_p = NULL; + } + + hpre_ctx_clear(ctx, is_clear_all); +} + +static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct dh params; + int ret; + + if (crypto_dh_decode_key(buf, len, ¶ms) < 0) + return -EINVAL; + + /* Free old secret if any */ + hpre_dh_clear_ctx(ctx, false); + + ret = hpre_dh_set_params(ctx, ¶ms); + if (ret < 0) + goto err_clear_ctx; + + memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, + params.key_size); + + return 0; + +err_clear_ctx: + hpre_dh_clear_ctx(ctx, false); + return ret; +} + +static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return ctx->key_sz; +} + +static int hpre_dh_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return hpre_ctx_init(ctx); +} + +static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + hpre_dh_clear_ctx(ctx, true); +} +#endif + +static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) +{ + while (!**ptr && *len) { + (*ptr)++; + (*len)--; + } +} + +static bool hpre_rsa_key_size_is_support(unsigned int len) +{ + unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT; + +#define _RSA_1024BITS_KEY_WDTH 1024 +#define _RSA_2048BITS_KEY_WDTH 2048 +#define _RSA_3072BITS_KEY_WDTH 3072 +#define _RSA_4096BITS_KEY_WDTH 4096 + + switch (bits) { + case _RSA_1024BITS_KEY_WDTH: + case _RSA_2048BITS_KEY_WDTH: + case _RSA_3072BITS_KEY_WDTH: + case _RSA_4096BITS_KEY_WDTH: + return true; + default: + return false; + } +} + +static int hpre_rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + void *tmp = akcipher_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ctr = 0; + int ret; + + if (!ctx) + return -EINVAL; + + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); + ret = crypto_akcipher_encrypt(req); + akcipher_request_set_tfm(req, tfm); + return ret; + } + + if (!ctx->rsa.pubkey) + return -EINVAL; + + ret = hpre_msg_request_set(ctx, req, true); + if (ret) + return ret; + + msg->dw0 |= HPRE_ALG_NC_NCRT; + msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey); + + ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); + if (ret) + goto clear_all; + + ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); + if (ret) + goto clear_all; + + do { + ret = hisi_qp_send(ctx->qp, msg); + } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); + + /* success */ + if (!ret) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + + return ret; +} + +static int hpre_rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + void *tmp = akcipher_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ctr = 0; + int ret; + + if (!ctx) + return -EINVAL; + + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); + ret = crypto_akcipher_decrypt(req); + akcipher_request_set_tfm(req, tfm); + return ret; + } + + if (!ctx->rsa.prikey) + return -EINVAL; + + ret = hpre_msg_request_set(ctx, req, true); + if (ret) + return ret; + + if (ctx->crt_g2_mode) { + msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey); + msg->dw0 |= HPRE_ALG_NC_CRT; + } else { + msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey); + msg->dw0 |= HPRE_ALG_NC_NCRT; + } + + ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); + if (ret) + goto clear_all; + + ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); + if (ret) + goto clear_all; + + do { + ret = hisi_qp_send(ctx->qp, msg); + } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); + + /* success */ + if (!ret) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + + return ret; +} + +static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, + size_t vlen, bool private) +{ + const char *ptr = value; + + hpre_rsa_drop_leading_zeros(&ptr, &vlen); + + ctx->key_sz = vlen; + + /* if invalid key size provided, we use software tfm */ + if (!hpre_rsa_key_size_is_support(ctx->key_sz)) + return 0; + + ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1, + &ctx->rsa.dma_pubkey, + GFP_KERNEL); + if (!ctx->rsa.pubkey) + return -ENOMEM; + + if (private) { + ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1, + &ctx->rsa.dma_prikey, + GFP_KERNEL); + if (!ctx->rsa.prikey) { + dma_free_coherent(HPRE_DEV(ctx), vlen << 1, + ctx->rsa.pubkey, + ctx->rsa.dma_pubkey); + ctx->rsa.pubkey = NULL; + return -ENOMEM; + } + memcpy(ctx->rsa.prikey + vlen, ptr, vlen); + } + memcpy(ctx->rsa.pubkey + vlen, ptr, vlen); + + /* Using hardware HPRE to do RSA */ + return 1; +} + +static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value, + size_t vlen) +{ + const char *ptr = value; + + hpre_rsa_drop_leading_zeros(&ptr, &vlen); + + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { + ctx->rsa.pubkey = NULL; + return -EINVAL; + } + + memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen); + + return 0; +} + +static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value, + size_t vlen) +{ + const char *ptr = value; + + hpre_rsa_drop_leading_zeros(&ptr, &vlen); + + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) + return -EINVAL; + + memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen); + + return 0; +} + +static int hpre_crt_para_get(char *para, const char *raw, + unsigned int raw_sz, unsigned int para_size) +{ + const char *ptr = raw; + size_t len = raw_sz; + + hpre_rsa_drop_leading_zeros(&ptr, &len); + if (!len || len > para_size) + return -EINVAL; + + memcpy(para + para_size - len, ptr, len); + + return 0; +} + +static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key) +{ + unsigned int hlf_ksz = ctx->key_sz >> 1; + struct device *dev = HPRE_DEV(ctx); + u64 offset; + int ret; + + ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, + &ctx->rsa.dma_crt_prikey, + GFP_KERNEL); + if (!ctx->rsa.crt_prikey) + return -ENOMEM; + + ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq, + rsa_key->dq_sz, hlf_ksz); + if (ret) + goto free_key; + + offset = hlf_ksz; + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp, + rsa_key->dp_sz, hlf_ksz); + if (ret) + goto free_key; + + offset = hlf_ksz * HPRE_CRT_Q; + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, + rsa_key->q, rsa_key->q_sz, hlf_ksz); + if (ret) + goto free_key; + + offset = hlf_ksz * HPRE_CRT_P; + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, + rsa_key->p, rsa_key->p_sz, hlf_ksz); + if (ret) + goto free_key; + + offset = hlf_ksz * HPRE_CRT_INV; + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, + rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz); + if (ret) + goto free_key; + + ctx->crt_g2_mode = true; + + return 0; + +free_key: + offset = hlf_ksz * HPRE_CRT_PRMS; + memset(ctx->rsa.crt_prikey, 0, offset); + dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, + ctx->rsa.dma_crt_prikey); + ctx->rsa.crt_prikey = NULL; + ctx->crt_g2_mode = false; + + return ret; +} + +/* If it is clear all, all the resources of the QP will be cleaned. */ +static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) +{ + unsigned int half_key_sz = ctx->key_sz >> 1; + struct device *dev = HPRE_DEV(ctx); + + if (is_clear_all) + hisi_qm_stop_qp(ctx->qp); + + if (ctx->rsa.pubkey) { + dma_free_coherent(dev, ctx->key_sz << 1, + ctx->rsa.pubkey, ctx->rsa.dma_pubkey); + ctx->rsa.pubkey = NULL; + } + + if (ctx->rsa.crt_prikey) { + memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS); + dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS, + ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); + ctx->rsa.crt_prikey = NULL; + } + + if (ctx->rsa.prikey) { + memset(ctx->rsa.prikey, 0, ctx->key_sz); + dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, + ctx->rsa.dma_prikey); + ctx->rsa.prikey = NULL; + } + + hpre_ctx_clear(ctx, is_clear_all); +} + +/* + * we should judge if it is CRT or not, + * CRT: return true, N-CRT: return false . + */ +static bool hpre_is_crt_key(struct rsa_key *key) +{ + u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz + + key->qinv_sz; + +#define LEN_OF_NCRT_PARA 5 + + /* N-CRT less than 5 parameters */ + return len > LEN_OF_NCRT_PARA; +} + +static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, + unsigned int keylen, bool private) +{ + struct rsa_key rsa_key; + int ret; + + hpre_rsa_clear_ctx(ctx, false); + + if (private) + ret = rsa_parse_priv_key(&rsa_key, key, keylen); + else + ret = rsa_parse_pub_key(&rsa_key, key, keylen); + if (ret < 0) + return ret; + + ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private); + if (ret <= 0) + return ret; + + if (private) { + ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz); + if (ret < 0) + goto free; + + if (hpre_is_crt_key(&rsa_key)) { + ret = hpre_rsa_setkey_crt(ctx, &rsa_key); + if (ret < 0) + goto free; + } + } + + ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz); + if (ret < 0) + goto free; + + if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) { + ret = -EINVAL; + goto free; + } + + return 0; + +free: + hpre_rsa_clear_ctx(ctx, false); + return ret; +} + +static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; + + ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen); + if (ret) + return ret; + + return hpre_rsa_setkey(ctx, key, keylen, false); +} + +static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; + + ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen); + if (ret) + return ret; + + return hpre_rsa_setkey(ctx, key, keylen, true); +} + +static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) + return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); + + return ctx->key_sz; +} + +static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + + ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0); + if (IS_ERR(ctx->rsa.soft_tfm)) { + pr_err("Can not alloc_akcipher!\n"); + return PTR_ERR(ctx->rsa.soft_tfm); + } + + return hpre_ctx_init(ctx); +} + +static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + + hpre_rsa_clear_ctx(ctx, true); + crypto_free_akcipher(ctx->rsa.soft_tfm); +} + +static struct akcipher_alg rsa = { + .sign = hpre_rsa_dec, + .verify = hpre_rsa_enc, + .encrypt = hpre_rsa_enc, + .decrypt = hpre_rsa_dec, + .set_pub_key = hpre_rsa_setpubkey, + .set_priv_key = hpre_rsa_setprivkey, + .max_size = hpre_rsa_max_size, + .init = hpre_rsa_init_tfm, + .exit = hpre_rsa_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "rsa", + .cra_driver_name = "hpre-rsa", + .cra_module = THIS_MODULE, + }, +}; + +#ifdef CONFIG_CRYPTO_DH +static struct kpp_alg dh = { + .set_secret = hpre_dh_set_secret, + .generate_public_key = hpre_dh_compute_value, + .compute_shared_secret = hpre_dh_compute_value, + .max_size = hpre_dh_max_size, + .init = hpre_dh_init_tfm, + .exit = hpre_dh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "dh", + .cra_driver_name = "hpre-dh", + .cra_module = THIS_MODULE, + }, +}; +#endif + +int hpre_algs_register(void) +{ + int ret = 0; + + mutex_lock(&hpre_alg_lock); + if (++hpre_active_devs == 1) { + rsa.base.cra_flags = 0; + ret = crypto_register_akcipher(&rsa); + if (ret) + goto unlock; +#ifdef CONFIG_CRYPTO_DH + ret = crypto_register_kpp(&dh); + if (ret) { + crypto_unregister_akcipher(&rsa); + goto unlock; + } +#endif + } + +unlock: + mutex_unlock(&hpre_alg_lock); + return ret; +} + +void hpre_algs_unregister(void) +{ + mutex_lock(&hpre_alg_lock); + if (--hpre_active_devs == 0) { + crypto_unregister_akcipher(&rsa); +#ifdef CONFIG_CRYPTO_DH + crypto_unregister_kpp(&dh); +#endif + } + mutex_unlock(&hpre_alg_lock); +} diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c new file mode 100644 index 000000000000..34e0424410bf --- /dev/null +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -0,0 +1,1052 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018-2019 HiSilicon Limited. */ +#include <linux/acpi.h> +#include <linux/aer.h> +#include <linux/bitops.h> +#include <linux/debugfs.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/topology.h> +#include "hpre.h" + +#define HPRE_VF_NUM 63 +#define HPRE_QUEUE_NUM_V2 1024 +#define HPRE_QM_ABNML_INT_MASK 0x100004 +#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) +#define HPRE_COMM_CNT_CLR_CE 0x0 +#define HPRE_CTRL_CNT_CLR_CE 0x301000 +#define HPRE_FSM_MAX_CNT 0x301008 +#define HPRE_VFG_AXQOS 0x30100c +#define HPRE_VFG_AXCACHE 0x301010 +#define HPRE_RDCHN_INI_CFG 0x301014 +#define HPRE_AWUSR_FP_CFG 0x301018 +#define HPRE_BD_ENDIAN 0x301020 +#define HPRE_ECC_BYPASS 0x301024 +#define HPRE_RAS_WIDTH_CFG 0x301028 +#define HPRE_POISON_BYPASS 0x30102c +#define HPRE_BD_ARUSR_CFG 0x301030 +#define HPRE_BD_AWUSR_CFG 0x301034 +#define HPRE_TYPES_ENB 0x301038 +#define HPRE_DATA_RUSER_CFG 0x30103c +#define HPRE_DATA_WUSER_CFG 0x301040 +#define HPRE_INT_MASK 0x301400 +#define HPRE_INT_STATUS 0x301800 +#define HPRE_CORE_INT_ENABLE 0 +#define HPRE_CORE_INT_DISABLE 0x003fffff +#define HPRE_RAS_ECC_1BIT_TH 0x30140c +#define HPRE_RDCHN_INI_ST 0x301a00 +#define HPRE_CLSTR_BASE 0x302000 +#define HPRE_CORE_EN_OFFSET 0x04 +#define HPRE_CORE_INI_CFG_OFFSET 0x20 +#define HPRE_CORE_INI_STATUS_OFFSET 0x80 +#define HPRE_CORE_HTBT_WARN_OFFSET 0x8c +#define HPRE_CORE_IS_SCHD_OFFSET 0x90 + +#define HPRE_RAS_CE_ENB 0x301410 +#define HPRE_HAC_RAS_CE_ENABLE 0x3f +#define HPRE_RAS_NFE_ENB 0x301414 +#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0 +#define HPRE_RAS_FE_ENB 0x301418 +#define HPRE_HAC_RAS_FE_ENABLE 0 + +#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET) +#define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET) +#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET) +#define HPRE_HAC_ECC1_CNT 0x301a04 +#define HPRE_HAC_ECC2_CNT 0x301a08 +#define HPRE_HAC_INT_STATUS 0x301800 +#define HPRE_HAC_SOURCE_INT 0x301600 +#define MASTER_GLOBAL_CTRL_SHUTDOWN 1 +#define MASTER_TRANS_RETURN_RW 3 +#define HPRE_MASTER_TRANS_RETURN 0x300150 +#define HPRE_MASTER_GLOBAL_CTRL 0x300000 +#define HPRE_CLSTR_ADDR_INTRVL 0x1000 +#define HPRE_CLUSTER_INQURY 0x100 +#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 +#define HPRE_TIMEOUT_ABNML_BIT 6 +#define HPRE_PASID_EN_BIT 9 +#define HPRE_REG_RD_INTVRL_US 10 +#define HPRE_REG_RD_TMOUT_US 1000 +#define HPRE_DBGFS_VAL_MAX_LEN 20 +#define HPRE_PCI_DEVICE_ID 0xa258 +#define HPRE_PCI_VF_DEVICE_ID 0xa259 +#define HPRE_ADDR(qm, offset) (qm->io_base + (offset)) +#define HPRE_QM_USR_CFG_MASK 0xfffffffe +#define HPRE_QM_AXI_CFG_MASK 0xffff +#define HPRE_QM_VFG_AX_MASK 0xff +#define HPRE_BD_USR_MASK 0x3 +#define HPRE_CLUSTER_CORE_MASK 0xf + +#define HPRE_VIA_MSI_DSM 1 + +static LIST_HEAD(hpre_list); +static DEFINE_MUTEX(hpre_list_lock); +static const char hpre_name[] = "hisi_hpre"; +static struct dentry *hpre_debugfs_root; +static const struct pci_device_id hpre_dev_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, hpre_dev_ids); + +struct hpre_hw_error { + u32 int_msk; + const char *msg; +}; + +static const char * const hpre_debug_file_name[] = { + [HPRE_CURRENT_QM] = "current_qm", + [HPRE_CLEAR_ENABLE] = "rdclr_en", + [HPRE_CLUSTER_CTRL] = "cluster_ctrl", +}; + +static const struct hpre_hw_error hpre_hw_errors[] = { + { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" }, + { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" }, + { .int_msk = BIT(2), .msg = "hpre_data_wr_err" }, + { .int_msk = BIT(3), .msg = "hpre_data_rd_err" }, + { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" }, + { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" }, + { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" }, + { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" }, + { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" }, + { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" }, + { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" }, + { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" }, + { /* sentinel */ } +}; + +static const u64 hpre_cluster_offsets[] = { + [HPRE_CLUSTER0] = + HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL, + [HPRE_CLUSTER1] = + HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL, + [HPRE_CLUSTER2] = + HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL, + [HPRE_CLUSTER3] = + HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL, +}; + +static struct debugfs_reg32 hpre_cluster_dfx_regs[] = { + {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, + {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, + {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, + {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET}, + {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, +}; + +static struct debugfs_reg32 hpre_com_dfx_regs[] = { + {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, + {"AXQOS ", HPRE_VFG_AXQOS}, + {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, + {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1}, + {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1}, + {"BD_ENDIAN ", HPRE_BD_ENDIAN}, + {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS}, + {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG}, + {"POISON_BYPASS ", HPRE_POISON_BYPASS}, + {"BD_ARUSER ", HPRE_BD_ARUSR_CFG}, + {"BD_AWUSER ", HPRE_BD_AWUSR_CFG}, + {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG}, + {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG}, + {"INT_STATUS ", HPRE_INT_STATUS}, +}; + +static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp) +{ + struct pci_dev *pdev; + u32 n, q_num; + u8 rev_id; + int ret; + + if (!val) + return -EINVAL; + + pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL); + if (!pdev) { + q_num = HPRE_QUEUE_NUM_V2; + pr_info("No device found currently, suppose queue number is %d\n", + q_num); + } else { + rev_id = pdev->revision; + if (rev_id != QM_HW_V2) + return -EINVAL; + + q_num = HPRE_QUEUE_NUM_V2; + } + + ret = kstrtou32(val, 10, &n); + if (ret != 0 || n == 0 || n > q_num) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops hpre_pf_q_num_ops = { + .set = hpre_pf_q_num_set, + .get = param_get_int, +}; + +static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM; +module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444); +MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)"); + +static inline void hpre_add_to_list(struct hpre *hpre) +{ + mutex_lock(&hpre_list_lock); + list_add_tail(&hpre->list, &hpre_list); + mutex_unlock(&hpre_list_lock); +} + +static inline void hpre_remove_from_list(struct hpre *hpre) +{ + mutex_lock(&hpre_list_lock); + list_del(&hpre->list); + mutex_unlock(&hpre_list_lock); +} + +struct hpre *hpre_find_device(int node) +{ + struct hpre *hpre, *ret = NULL; + int min_distance = INT_MAX; + struct device *dev; + int dev_node = 0; + + mutex_lock(&hpre_list_lock); + list_for_each_entry(hpre, &hpre_list, list) { + dev = &hpre->qm.pdev->dev; +#ifdef CONFIG_NUMA + dev_node = dev->numa_node; + if (dev_node < 0) + dev_node = 0; +#endif + if (node_distance(dev_node, node) < min_distance) { + ret = hpre; + min_distance = node_distance(dev_node, node); + } + } + mutex_unlock(&hpre_list_lock); + + return ret; +} + +static int hpre_cfg_by_dsm(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + union acpi_object *obj; + guid_t guid; + + if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) { + dev_err(dev, "Hpre GUID failed\n"); + return -EINVAL; + } + + /* Switch over to MSI handling due to non-standard PCI implementation */ + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, + 0, HPRE_VIA_MSI_DSM, NULL); + if (!obj) { + dev_err(dev, "ACPI handle failed!\n"); + return -EIO; + } + + ACPI_FREE(obj); + + return 0; +} + +static int hpre_set_user_domain_and_cache(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + struct device *dev = &qm->pdev->dev; + unsigned long offset; + int ret, i; + u32 val; + + writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE)); + writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE)); + writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG)); + + /* disable FLR triggered by BME(bus master enable) */ + writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); + + /* HPRE need more time, we close this interrupt */ + val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); + val |= BIT(HPRE_TIMEOUT_ABNML_BIT); + writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); + + writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB)); + writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE)); + writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN)); + writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK)); + writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH)); + writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS)); + writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE)); + writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS)); + + writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG)); + writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG)); + writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG)); + ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val, + val & BIT(0), + HPRE_REG_RD_INTVRL_US, + HPRE_REG_RD_TMOUT_US); + if (ret) { + dev_err(dev, "read rd channel timeout fail!\n"); + return -ETIMEDOUT; + } + + for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { + offset = i * HPRE_CLSTR_ADDR_INTRVL; + + /* clusters initiating */ + writel(HPRE_CLUSTER_CORE_MASK, + HPRE_ADDR(qm, offset + HPRE_CORE_ENB)); + writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG)); + ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset + + HPRE_CORE_INI_STATUS), val, + ((val & HPRE_CLUSTER_CORE_MASK) == + HPRE_CLUSTER_CORE_MASK), + HPRE_REG_RD_INTVRL_US, + HPRE_REG_RD_TMOUT_US); + if (ret) { + dev_err(dev, + "cluster %d int st status timeout!\n", i); + return -ETIMEDOUT; + } + } + + ret = hpre_cfg_by_dsm(qm); + if (ret) + dev_err(dev, "acpi_evaluate_dsm err.\n"); + + return ret; +} + +static void hpre_cnt_regs_clear(struct hisi_qm *qm) +{ + unsigned long offset; + int i; + + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + + /* clear clusterX/cluster_ctrl */ + for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { + offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; + writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY); + } + + /* clear rdclr_en */ + writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE); + + hisi_qm_debug_regs_clear(qm); +} + +static void hpre_hw_error_disable(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + + /* disable hpre hw error interrupts */ + writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK); +} + +static void hpre_hw_error_enable(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + + /* enable hpre hw error interrupts */ + writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); + writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); + writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB); + writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); +} + +static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) +{ + struct hpre *hpre = container_of(file->debug, struct hpre, debug); + + return &hpre->qm; +} + +static u32 hpre_current_qm_read(struct hpre_debugfs_file *file) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + struct hpre_debug *debug = file->debug; + struct hpre *hpre = container_of(debug, struct hpre, debug); + u32 num_vfs = hpre->num_vfs; + u32 vfq_num, tmp; + + + if (val > num_vfs) + return -EINVAL; + + /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ + if (val == 0) { + qm->debug.curr_qm_qp_num = qm->qp_num; + } else { + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; + if (val == num_vfs) { + qm->debug.curr_qm_qp_num = + qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num; + } else { + qm->debug.curr_qm_qp_num = vfq_num; + } + } + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + + return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & + HPRE_CTRL_CNT_CLR_CE_BIT; +} + +static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + u32 tmp; + + if (val != 1 && val != 0) + return -EINVAL; + + tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & + ~HPRE_CTRL_CNT_CLR_CE_BIT) | val; + writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); + + return 0; +} + +static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + int cluster_index = file->index - HPRE_CLUSTER_CTRL; + unsigned long offset = HPRE_CLSTR_BASE + + cluster_index * HPRE_CLSTR_ADDR_INTRVL; + + return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); +} + +static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + int cluster_index = file->index - HPRE_CLUSTER_CTRL; + unsigned long offset = HPRE_CLSTR_BASE + cluster_index * + HPRE_CLSTR_ADDR_INTRVL; + + writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); + + return 0; +} + +static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct hpre_debugfs_file *file = filp->private_data; + char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; + u32 val; + int ret; + + spin_lock_irq(&file->lock); + switch (file->type) { + case HPRE_CURRENT_QM: + val = hpre_current_qm_read(file); + break; + case HPRE_CLEAR_ENABLE: + val = hpre_clear_enable_read(file); + break; + case HPRE_CLUSTER_CTRL: + val = hpre_cluster_inqry_read(file); + break; + default: + spin_unlock_irq(&file->lock); + return -EINVAL; + } + spin_unlock_irq(&file->lock); + ret = sprintf(tbuf, "%u\n", val); + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct hpre_debugfs_file *file = filp->private_data; + char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; + unsigned long val; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= HPRE_DBGFS_VAL_MAX_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1, + pos, buf, count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + spin_lock_irq(&file->lock); + switch (file->type) { + case HPRE_CURRENT_QM: + ret = hpre_current_qm_write(file, val); + if (ret) + goto err_input; + break; + case HPRE_CLEAR_ENABLE: + ret = hpre_clear_enable_write(file, val); + if (ret) + goto err_input; + break; + case HPRE_CLUSTER_CTRL: + ret = hpre_cluster_inqry_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + spin_unlock_irq(&file->lock); + + return count; + +err_input: + spin_unlock_irq(&file->lock); + return ret; +} + +static const struct file_operations hpre_ctrl_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hpre_ctrl_debug_read, + .write = hpre_ctrl_debug_write, +}; + +static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, + enum hpre_ctrl_dbgfs_file type, int indx) +{ + struct dentry *tmp, *file_dir; + + if (dir) + file_dir = dir; + else + file_dir = dbg->debug_root; + + if (type >= HPRE_DEBUG_FILE_NUM) + return -EINVAL; + + spin_lock_init(&dbg->files[indx].lock); + dbg->files[indx].debug = dbg; + dbg->files[indx].type = type; + dbg->files[indx].index = indx; + tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir, + dbg->files + indx, &hpre_ctrl_debug_fops); + if (!tmp) + return -ENOENT; + + return 0; +} + +static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) +{ + struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hisi_qm *qm = &hpre->qm; + struct device *dev = &qm->pdev->dev; + struct debugfs_regset32 *regset; + struct dentry *tmp; + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOMEM; + + regset->regs = hpre_com_dfx_regs; + regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs); + regset->base = qm->io_base; + + tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset); + if (!tmp) + return -ENOENT; + + return 0; +} + +static int hpre_cluster_debugfs_init(struct hpre_debug *debug) +{ + struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hisi_qm *qm = &hpre->qm; + struct device *dev = &qm->pdev->dev; + char buf[HPRE_DBGFS_VAL_MAX_LEN]; + struct debugfs_regset32 *regset; + struct dentry *tmp_d, *tmp; + int i, ret; + + for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { + sprintf(buf, "cluster%d", i); + + tmp_d = debugfs_create_dir(buf, debug->debug_root); + if (!tmp_d) + return -ENOENT; + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOMEM; + + regset->regs = hpre_cluster_dfx_regs; + regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs); + regset->base = qm->io_base + hpre_cluster_offsets[i]; + + tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset); + if (!tmp) + return -ENOENT; + ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL, + i + HPRE_CLUSTER_CTRL); + if (ret) + return ret; + } + + return 0; +} + +static int hpre_ctrl_debug_init(struct hpre_debug *debug) +{ + int ret; + + ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM, + HPRE_CURRENT_QM); + if (ret) + return ret; + + ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE, + HPRE_CLEAR_ENABLE); + if (ret) + return ret; + + ret = hpre_pf_comm_regs_debugfs_init(debug); + if (ret) + return ret; + + return hpre_cluster_debugfs_init(debug); +} + +static int hpre_debugfs_init(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + struct device *dev = &qm->pdev->dev; + struct dentry *dir; + int ret; + + dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root); + if (!dir) + return -ENOENT; + + qm->debug.debug_root = dir; + + ret = hisi_qm_debug_init(qm); + if (ret) + goto failed_to_create; + + if (qm->pdev->device == HPRE_PCI_DEVICE_ID) { + hpre->debug.debug_root = dir; + ret = hpre_ctrl_debug_init(&hpre->debug); + if (ret) + goto failed_to_create; + } + return 0; + +failed_to_create: + debugfs_remove_recursive(qm->debug.debug_root); + return ret; +} + +static void hpre_debugfs_exit(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + + debugfs_remove_recursive(qm->debug.debug_root); +} + +static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) +{ + enum qm_hw_ver rev_id; + + rev_id = hisi_qm_get_hw_version(pdev); + if (rev_id < 0) + return -ENODEV; + + if (rev_id == QM_HW_V1) { + pci_warn(pdev, "HPRE version 1 is not supported!\n"); + return -EINVAL; + } + + qm->pdev = pdev; + qm->ver = rev_id; + qm->sqe_size = HPRE_SQE_SIZE; + qm->dev_name = hpre_name; + qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? + QM_HW_PF : QM_HW_VF; + if (pdev->is_physfn) { + qm->qp_base = HPRE_PF_DEF_Q_BASE; + qm->qp_num = hpre_pf_q_num; + } + qm->use_dma_api = true; + + return 0; +} + +static void hpre_hw_err_init(struct hpre *hpre) +{ + hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE, + 0, QM_DB_RANDOM_INVALID); + hpre_hw_error_enable(hpre); +} + +static int hpre_pf_probe_init(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + int ret; + + qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2; + + ret = hpre_set_user_domain_and_cache(hpre); + if (ret) + return ret; + + hpre_hw_err_init(hpre); + + return 0; +} + +static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_qm *qm; + struct hpre *hpre; + int ret; + + hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL); + if (!hpre) + return -ENOMEM; + + pci_set_drvdata(pdev, hpre); + + qm = &hpre->qm; + ret = hpre_qm_pre_init(qm, pdev); + if (ret) + return ret; + + ret = hisi_qm_init(qm); + if (ret) + return ret; + + if (pdev->is_physfn) { + ret = hpre_pf_probe_init(hpre); + if (ret) + goto err_with_qm_init; + } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + goto err_with_qm_init; + } + + ret = hisi_qm_start(qm); + if (ret) + goto err_with_err_init; + + ret = hpre_debugfs_init(hpre); + if (ret) + dev_warn(&pdev->dev, "init debugfs fail!\n"); + + hpre_add_to_list(hpre); + + ret = hpre_algs_register(); + if (ret < 0) { + hpre_remove_from_list(hpre); + pci_err(pdev, "fail to register algs to crypto!\n"); + goto err_with_qm_start; + } + return 0; + +err_with_qm_start: + hisi_qm_stop(qm); + +err_with_err_init: + if (pdev->is_physfn) + hpre_hw_error_disable(hpre); + +err_with_qm_init: + hisi_qm_uninit(qm); + + return ret; +} + +static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs) +{ + struct hisi_qm *qm = &hpre->qm; + u32 qp_num = qm->qp_num; + int q_num, remain_q_num, i; + u32 q_base = qp_num; + int ret; + + if (!num_vfs) + return -EINVAL; + + remain_q_num = qm->ctrl_qp_num - qp_num; + + /* If remaining queues are not enough, return error. */ + if (remain_q_num < num_vfs) + return -EINVAL; + + q_num = remain_q_num / num_vfs; + for (i = 1; i <= num_vfs; i++) { + if (i == num_vfs) + q_num += remain_q_num % num_vfs; + ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num); + if (ret) + return ret; + q_base += q_num; + } + + return 0; +} + +static int hpre_clear_vft_config(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + u32 num_vfs = hpre->num_vfs; + int ret; + u32 i; + + for (i = 1; i <= num_vfs; i++) { + ret = hisi_qm_set_vft(qm, i, 0, 0); + if (ret) + return ret; + } + hpre->num_vfs = 0; + + return 0; +} + +static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs) +{ + struct hpre *hpre = pci_get_drvdata(pdev); + int pre_existing_vfs, num_vfs, ret; + + pre_existing_vfs = pci_num_vf(pdev); + if (pre_existing_vfs) { + pci_err(pdev, + "Can't enable VF. Please disable pre-enabled VFs!\n"); + return 0; + } + + num_vfs = min_t(int, max_vfs, HPRE_VF_NUM); + ret = hpre_vf_q_assign(hpre, num_vfs); + if (ret) { + pci_err(pdev, "Can't assign queues for VF!\n"); + return ret; + } + + hpre->num_vfs = num_vfs; + + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) { + pci_err(pdev, "Can't enable VF!\n"); + hpre_clear_vft_config(hpre); + return ret; + } + + return num_vfs; +} + +static int hpre_sriov_disable(struct pci_dev *pdev) +{ + struct hpre *hpre = pci_get_drvdata(pdev); + + if (pci_vfs_assigned(pdev)) { + pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n"); + return -EPERM; + } + + /* remove in hpre_pci_driver will be called to free VF resources */ + pci_disable_sriov(pdev); + + return hpre_clear_vft_config(hpre); +} + +static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs) + return hpre_sriov_enable(pdev, num_vfs); + else + return hpre_sriov_disable(pdev); +} + +static void hpre_remove(struct pci_dev *pdev) +{ + struct hpre *hpre = pci_get_drvdata(pdev); + struct hisi_qm *qm = &hpre->qm; + int ret; + + hpre_algs_unregister(); + hpre_remove_from_list(hpre); + if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) { + ret = hpre_sriov_disable(pdev); + if (ret) { + pci_err(pdev, "Disable SRIOV fail!\n"); + return; + } + } + if (qm->fun_type == QM_HW_PF) { + hpre_cnt_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + } + + hpre_debugfs_exit(hpre); + hisi_qm_stop(qm); + if (qm->fun_type == QM_HW_PF) + hpre_hw_error_disable(hpre); + hisi_qm_uninit(qm); +} + +static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts) +{ + const struct hpre_hw_error *err = hpre_hw_errors; + struct device *dev = &hpre->qm.pdev->dev; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + err++; + } +} + +static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre) +{ + u32 err_sts; + + /* read err sts */ + err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS); + if (err_sts) { + hpre_log_hw_error(hpre, err_sts); + + /* clear error interrupts */ + writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT); + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev) +{ + struct hpre *hpre = pci_get_drvdata(pdev); + pci_ers_result_t qm_ret, hpre_ret; + + /* log qm error */ + qm_ret = hisi_qm_hw_error_handle(&hpre->qm); + + /* log hpre error */ + hpre_ret = hpre_hw_error_handle(hpre); + + return (qm_ret == PCI_ERS_RESULT_NEED_RESET || + hpre_ret == PCI_ERS_RESULT_NEED_RESET) ? + PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; +} + +static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + pci_info(pdev, "PCI error detected, state(=%d)!!\n", state); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + return hpre_process_hw_error(pdev); +} + +static const struct pci_error_handlers hpre_err_handler = { + .error_detected = hpre_error_detected, +}; + +static struct pci_driver hpre_pci_driver = { + .name = hpre_name, + .id_table = hpre_dev_ids, + .probe = hpre_probe, + .remove = hpre_remove, + .sriov_configure = hpre_sriov_configure, + .err_handler = &hpre_err_handler, +}; + +static void hpre_register_debugfs(void) +{ + if (!debugfs_initialized()) + return; + + hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL); + if (IS_ERR_OR_NULL(hpre_debugfs_root)) + hpre_debugfs_root = NULL; +} + +static void hpre_unregister_debugfs(void) +{ + debugfs_remove_recursive(hpre_debugfs_root); +} + +static int __init hpre_init(void) +{ + int ret; + + hpre_register_debugfs(); + + ret = pci_register_driver(&hpre_pci_driver); + if (ret) { + hpre_unregister_debugfs(); + pr_err("hpre: can't register hisi hpre driver.\n"); + } + + return ret; +} + +static void __exit hpre_exit(void) +{ + pci_unregister_driver(&hpre_pci_driver); + hpre_unregister_debugfs(); +} + +module_init(hpre_init); +module_exit(hpre_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); +MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator"); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f975c393a603..b57da5ef8b5b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -59,17 +59,17 @@ #define QM_CQ_PHASE_SHIFT 0 #define QM_CQ_FLAG_SHIFT 1 -#define QM_CQE_PHASE(cqe) ((cqe)->w7 & 0x1) +#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) #define QM_QC_CQE_SIZE 4 /* eqc shift */ #define QM_EQE_AEQE_SIZE (2UL << 12) #define QM_EQC_PHASE_SHIFT 16 -#define QM_EQE_PHASE(eqe) (((eqe)->dw0 >> 16) & 0x1) +#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) #define QM_EQE_CQN_MASK GENMASK(15, 0) -#define QM_AEQE_PHASE(aeqe) (((aeqe)->dw0 >> 16) & 0x1) +#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) #define QM_AEQE_TYPE_SHIFT 17 #define QM_DOORBELL_CMD_SQ 0 @@ -169,17 +169,17 @@ #define QM_MK_SQC_DW3_V2(sqe_sz) \ ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) -#define INIT_QC_COMMON(qc, base, pasid) do { \ - (qc)->head = 0; \ - (qc)->tail = 0; \ - (qc)->base_l = lower_32_bits(base); \ - (qc)->base_h = upper_32_bits(base); \ - (qc)->dw3 = 0; \ - (qc)->w8 = 0; \ - (qc)->rsvd0 = 0; \ - (qc)->pasid = pasid; \ - (qc)->w11 = 0; \ - (qc)->rsvd1 = 0; \ +#define INIT_QC_COMMON(qc, base, pasid) do { \ + (qc)->head = 0; \ + (qc)->tail = 0; \ + (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \ + (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \ + (qc)->dw3 = 0; \ + (qc)->w8 = 0; \ + (qc)->rsvd0 = 0; \ + (qc)->pasid = cpu_to_le16(pasid); \ + (qc)->w11 = 0; \ + (qc)->rsvd1 = 0; \ } while (0) enum vft_type { @@ -331,12 +331,18 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; unsigned long tmp0 = 0, tmp1 = 0; + if (!IS_ENABLED(CONFIG_ARM64)) { + memcpy_toio(fun_base, src, 16); + wmb(); + return; + } + asm volatile("ldp %0, %1, %3\n" "stp %0, %1, %2\n" "dsb sy\n" : "=&r" (tmp0), "=&r" (tmp1), - "+Q" (*((char *)fun_base)) + "+Q" (*((char __iomem *)fun_base)) : "Q" (*((char *)src)) : "memory"); } @@ -350,12 +356,12 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", queue, cmd, (unsigned long long)dma_addr); - mailbox.w0 = cmd | + mailbox.w0 = cpu_to_le16(cmd | (op ? 0x1 << QM_MB_OP_SHIFT : 0) | - (0x1 << QM_MB_BUSY_SHIFT); - mailbox.queue_num = queue; - mailbox.base_l = lower_32_bits(dma_addr); - mailbox.base_h = upper_32_bits(dma_addr); + (0x1 << QM_MB_BUSY_SHIFT)); + mailbox.queue_num = cpu_to_le16(queue); + mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr)); + mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr)); mailbox.rsvd = 0; mutex_lock(&qm->mailbox_lock); @@ -442,7 +448,7 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm) static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) { - u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK; + u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; return qm->qp_array[cqn]; } @@ -464,7 +470,8 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) if (qp->req_cb) { while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { dma_rmb(); - qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head); + qp->req_cb(qp, qp->sqe + qm->sqe_size * + le16_to_cpu(cqe->sq_head)); qm_cq_head_update(qp); cqe = qp->cqe + qp->qp_status.cq_head; qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, @@ -542,7 +549,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) return IRQ_NONE; while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { - type = aeqe->dw0 >> QM_AEQE_TYPE_SHIFT; + type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; if (type < ARRAY_SIZE(qm_fifo_overflow)) dev_err(&qm->pdev->dev, "%s overflow\n", qm_fifo_overflow[type]); @@ -646,7 +653,7 @@ static void qm_init_qp_status(struct hisi_qp *qp) qp_status->sq_tail = 0; qp_status->cq_head = 0; - qp_status->cqc_phase = 1; + qp_status->cqc_phase = true; qp_status->flags = 0; } @@ -963,13 +970,11 @@ static const struct file_operations qm_regs_fops = { static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) { - struct dentry *qm_d = qm->debug.qm_d, *tmp; + struct dentry *qm_d = qm->debug.qm_d; struct debugfs_file *file = qm->debug.files + index; - tmp = debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file, - &qm_debug_fops); - if (IS_ERR(tmp)) - return -ENOENT; + debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file, + &qm_debug_fops); file->index = index; mutex_init(&file->lock); @@ -981,9 +986,6 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, u32 msi) { - dev_info(&qm->pdev->dev, - "QM v%d does not support hw error handle\n", qm->ver); - writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); } @@ -1123,6 +1125,7 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) } set_bit(qp_id, qm->qp_bitmap); qm->qp_array[qp_id] = qp; + qm->qp_in_used++; write_unlock(&qm->qps_lock); @@ -1187,6 +1190,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp) write_lock(&qm->qps_lock); qm->qp_array[qp->qp_id] = NULL; clear_bit(qp->qp_id, qm->qp_bitmap); + qm->qp_in_used--; write_unlock(&qm->qps_lock); kfree(qp); @@ -1218,14 +1222,14 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); if (ver == QM_HW_V1) { - sqc->dw3 = QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size); - sqc->w8 = QM_Q_DEPTH - 1; + sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); + sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); } else if (ver == QM_HW_V2) { - sqc->dw3 = QM_MK_SQC_DW3_V2(qm->sqe_size); + sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); sqc->w8 = 0; /* rand_qc */ } - sqc->cq_num = qp_id; - sqc->w13 = QM_MK_SQC_W13(0, 1, qp->alg_type); + sqc->cq_num = cpu_to_le16(qp_id); + sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); @@ -1245,13 +1249,13 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); if (ver == QM_HW_V1) { - cqc->dw3 = QM_MK_CQC_DW3_V1(0, 0, 0, 4); - cqc->w8 = QM_Q_DEPTH - 1; + cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4)); + cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); } else if (ver == QM_HW_V2) { - cqc->dw3 = QM_MK_CQC_DW3_V2(4); + cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4)); cqc->w8 = 0; } - cqc->dw6 = 1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT; + cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); @@ -1392,6 +1396,24 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm) } /** + * hisi_qm_get_free_qp_num() - Get free number of qp in qm. + * @qm: The qm which want to get free qp. + * + * This function return free number of qp in qm. + */ +int hisi_qm_get_free_qp_num(struct hisi_qm *qm) +{ + int ret; + + read_lock(&qm->qps_lock); + ret = qm->qp_num - qm->qp_in_used; + read_unlock(&qm->qps_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); + +/** * hisi_qm_init() - Initialize configures about qm. * @qm: The qm needing init. * @@ -1454,6 +1476,7 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_free_irq_vectors; + qm->qp_in_used = 0; mutex_init(&qm->mailbox_lock); rwlock_init(&qm->qps_lock); @@ -1560,8 +1583,8 @@ static void qm_init_eq_aeq_status(struct hisi_qm *qm) status->eq_head = 0; status->aeq_head = 0; - status->eqc_phase = 1; - status->aeqc_phase = 1; + status->eqc_phase = true; + status->aeqc_phase = true; } static int qm_eq_ctx_cfg(struct hisi_qm *qm) @@ -1585,11 +1608,11 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) return -ENOMEM; } - eqc->base_l = lower_32_bits(qm->eqe_dma); - eqc->base_h = upper_32_bits(qm->eqe_dma); + eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); + eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); if (qm->ver == QM_HW_V1) - eqc->dw3 = QM_EQE_AEQE_SIZE; - eqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT); + eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); + eqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); kfree(eqc); @@ -1606,9 +1629,9 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) return -ENOMEM; } - aeqc->base_l = lower_32_bits(qm->aeqe_dma); - aeqc->base_h = upper_32_bits(qm->aeqe_dma); - aeqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT); + aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); + aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); + aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); @@ -1780,12 +1803,10 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop); */ int hisi_qm_debug_init(struct hisi_qm *qm) { - struct dentry *qm_d, *qm_regs; + struct dentry *qm_d; int i, ret; qm_d = debugfs_create_dir("qm", qm->debug.debug_root); - if (IS_ERR(qm_d)) - return -ENOENT; qm->debug.qm_d = qm_d; /* only show this in PF */ @@ -1796,12 +1817,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm) goto failed_to_create; } - qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, - &qm_regs_fops); - if (IS_ERR(qm_regs)) { - ret = -ENOENT; - goto failed_to_create; - } + debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); return 0; @@ -1862,8 +1878,7 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, u32 msi) { if (!qm->ops->hw_error_init) { - dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error handling!\n", - qm->ver); + dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); return; } @@ -1877,11 +1892,10 @@ EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init); * * Accelerators use this function to handle qm non-fatal hardware errors. */ -int hisi_qm_hw_error_handle(struct hisi_qm *qm) +pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm) { if (!qm->ops->hw_error_handle) { - dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error report!\n", - qm->ver); + dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); return PCI_ERS_RESULT_NONE; } diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 70e672ae86bf..078b8f1f1b77 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -75,6 +75,8 @@ #define QM_Q_DEPTH 1024 +#define HISI_ACC_SGL_SGE_NR_MAX 255 + enum qp_state { QP_STOP, }; @@ -132,6 +134,7 @@ struct hisi_qm { u32 sqe_size; u32 qp_base; u32 qp_num; + u32 qp_in_used; u32 ctrl_qp_num; struct qm_dma qdma; @@ -204,12 +207,24 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); int hisi_qm_stop_qp(struct hisi_qp *qp); void hisi_qm_release_qp(struct hisi_qp *qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg); +int hisi_qm_get_free_qp_num(struct hisi_qm *qm); int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number); int hisi_qm_debug_init(struct hisi_qm *qm); void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, u32 msi); -int hisi_qm_hw_error_handle(struct hisi_qm *qm); +pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm); enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); void hisi_qm_debug_regs_clear(struct hisi_qm *qm); + +struct hisi_acc_sgl_pool; +struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, + struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma); +void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, + struct hisi_acc_hw_sgl *hw_sgl); +struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, + u32 count, u32 sge_nr); +void hisi_acc_free_sgl_pool(struct device *dev, + struct hisi_acc_sgl_pool *pool); #endif diff --git a/drivers/crypto/hisilicon/sec2/Makefile b/drivers/crypto/hisilicon/sec2/Makefile new file mode 100644 index 000000000000..b4f6cf14be3a --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += hisi_sec2.o +hisi_sec2-objs = sec_main.o sec_crypto.o diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h new file mode 100644 index 000000000000..26754d0570ba --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 HiSilicon Limited. */ + +#ifndef __HISI_SEC_V2_H +#define __HISI_SEC_V2_H + +#include <linux/list.h> + +#include "../qm.h" +#include "sec_crypto.h" + +/* Cipher resource per hardware SEC queue */ +struct sec_cipher_res { + u8 *c_ivin; + dma_addr_t c_ivin_dma; +}; + +/* Cipher request of SEC private */ +struct sec_cipher_req { + struct hisi_acc_hw_sgl *c_in; + dma_addr_t c_in_dma; + struct hisi_acc_hw_sgl *c_out; + dma_addr_t c_out_dma; + u8 *c_ivin; + dma_addr_t c_ivin_dma; + struct skcipher_request *sk_req; + u32 c_len; + bool encrypt; +}; + +/* SEC request of Crypto */ +struct sec_req { + struct sec_sqe sec_sqe; + struct sec_ctx *ctx; + struct sec_qp_ctx *qp_ctx; + + /* Cipher supported only at present */ + struct sec_cipher_req c_req; + int err_type; + int req_id; + + /* Status of the SEC request */ + int fake_busy; +}; + +/** + * struct sec_req_op - Operations for SEC request + * @get_res: Get resources for TFM on the SEC device + * @resource_alloc: Allocate resources for queue context on the SEC device + * @resource_free: Free resources for queue context on the SEC device + * @buf_map: DMA map the SGL buffers of the request + * @buf_unmap: DMA unmap the SGL buffers of the request + * @bd_fill: Fill the SEC queue BD + * @bd_send: Send the SEC BD into the hardware queue + * @callback: Call back for the request + * @process: Main processing logic of Skcipher + */ +struct sec_req_op { + int (*get_res)(struct sec_ctx *ctx, struct sec_req *req); + int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx); + void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx); + int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req); + void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req); + void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req); + int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req); + int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req); + void (*callback)(struct sec_ctx *ctx, struct sec_req *req); + int (*process)(struct sec_ctx *ctx, struct sec_req *req); +}; + +/* SEC cipher context which cipher's relatives */ +struct sec_cipher_ctx { + u8 *c_key; + dma_addr_t c_key_dma; + sector_t iv_offset; + u32 c_gran_size; + u32 ivsize; + u8 c_mode; + u8 c_alg; + u8 c_key_len; +}; + +/* SEC queue context which defines queue's relatives */ +struct sec_qp_ctx { + struct hisi_qp *qp; + struct sec_req **req_list; + struct idr req_idr; + void *alg_meta_data; + struct sec_ctx *ctx; + struct mutex req_lock; + struct hisi_acc_sgl_pool *c_in_pool; + struct hisi_acc_sgl_pool *c_out_pool; + atomic_t pending_reqs; +}; + +/* SEC Crypto TFM context which defines queue and cipher .etc relatives */ +struct sec_ctx { + struct sec_qp_ctx *qp_ctx; + struct sec_dev *sec; + const struct sec_req_op *req_op; + + /* Half queues for encipher, and half for decipher */ + u32 hlf_q_num; + + /* Threshold for fake busy, trigger to return -EBUSY to user */ + u32 fake_req_limit; + + /* Currrent cyclic index to select a queue for encipher */ + atomic_t enc_qcyclic; + + /* Currrent cyclic index to select a queue for decipher */ + atomic_t dec_qcyclic; + struct sec_cipher_ctx c_ctx; +}; + +enum sec_endian { + SEC_LE = 0, + SEC_32BE, + SEC_64BE +}; + +enum sec_debug_file_index { + SEC_CURRENT_QM, + SEC_CLEAR_ENABLE, + SEC_DEBUG_FILE_NUM, +}; + +struct sec_debug_file { + enum sec_debug_file_index index; + spinlock_t lock; + struct hisi_qm *qm; +}; + +struct sec_dfx { + u64 send_cnt; + u64 recv_cnt; +}; + +struct sec_debug { + struct sec_dfx dfx; + struct sec_debug_file files[SEC_DEBUG_FILE_NUM]; +}; + +struct sec_dev { + struct hisi_qm qm; + struct list_head list; + struct sec_debug debug; + u32 ctx_q_num; + u32 num_vfs; + unsigned long status; +}; + +struct sec_dev *sec_find_device(int node); +int sec_register_to_crypto(void); +void sec_unregister_from_crypto(void); +#endif diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c new file mode 100644 index 000000000000..dc1eb97d57f7 --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -0,0 +1,889 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/des.h> +#include <crypto/skcipher.h> +#include <crypto/xts.h> +#include <linux/crypto.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> + +#include "sec.h" +#include "sec_crypto.h" + +#define SEC_PRIORITY 4001 +#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) +#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) +#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) +#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) + +/* SEC sqe(bd) bit operational relative MACRO */ +#define SEC_DE_OFFSET 1 +#define SEC_CIPHER_OFFSET 4 +#define SEC_SCENE_OFFSET 3 +#define SEC_DST_SGL_OFFSET 2 +#define SEC_SRC_SGL_OFFSET 7 +#define SEC_CKEY_OFFSET 9 +#define SEC_CMODE_OFFSET 12 +#define SEC_FLAG_OFFSET 7 +#define SEC_FLAG_MASK 0x0780 +#define SEC_TYPE_MASK 0x0F +#define SEC_DONE_MASK 0x0001 + +#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) +#define SEC_SGL_SGE_NR 128 +#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) + +static DEFINE_MUTEX(sec_algs_lock); +static unsigned int sec_active_devs; + +/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ +static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req) +{ + if (req->c_req.encrypt) + return (u32)atomic_inc_return(&ctx->enc_qcyclic) % + ctx->hlf_q_num; + + return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + + ctx->hlf_q_num; +} + +static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req) +{ + if (req->c_req.encrypt) + atomic_dec(&ctx->enc_qcyclic); + else + atomic_dec(&ctx->dec_qcyclic); +} + +static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) +{ + int req_id; + + mutex_lock(&qp_ctx->req_lock); + + req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, + 0, QM_Q_DEPTH, GFP_ATOMIC); + mutex_unlock(&qp_ctx->req_lock); + if (req_id < 0) { + dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); + return req_id; + } + + req->qp_ctx = qp_ctx; + qp_ctx->req_list[req_id] = req; + return req_id; +} + +static void sec_free_req_id(struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + int req_id = req->req_id; + + if (req_id < 0 || req_id >= QM_Q_DEPTH) { + dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); + return; + } + + qp_ctx->req_list[req_id] = NULL; + req->qp_ctx = NULL; + + mutex_lock(&qp_ctx->req_lock); + idr_remove(&qp_ctx->req_idr, req_id); + mutex_unlock(&qp_ctx->req_lock); +} + +static void sec_req_cb(struct hisi_qp *qp, void *resp) +{ + struct sec_qp_ctx *qp_ctx = qp->qp_ctx; + struct sec_sqe *bd = resp; + u16 done, flag; + u8 type; + struct sec_req *req; + + type = bd->type_cipher_auth & SEC_TYPE_MASK; + if (type == SEC_BD_TYPE2) { + req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; + req->err_type = bd->type2.error_type; + + done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; + flag = (le16_to_cpu(bd->type2.done_flag) & + SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; + if (req->err_type || done != 0x1 || flag != 0x2) + dev_err(SEC_CTX_DEV(req->ctx), + "err_type[%d],done[%d],flag[%d]\n", + req->err_type, done, flag); + } else { + pr_err("err bd type [%d]\n", type); + return; + } + + __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1); + + req->ctx->req_op->buf_unmap(req->ctx, req); + + req->ctx->req_op->callback(req->ctx, req); +} + +static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + int ret; + + mutex_lock(&qp_ctx->req_lock); + ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); + mutex_unlock(&qp_ctx->req_lock); + __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1); + + if (ret == -EBUSY) + return -ENOBUFS; + + if (!ret) { + if (req->fake_busy) + ret = -EBUSY; + else + ret = -EINPROGRESS; + } + + return ret; +} + +static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, + int qp_ctx_id, int alg_type) +{ + struct device *dev = SEC_CTX_DEV(ctx); + struct sec_qp_ctx *qp_ctx; + struct hisi_qp *qp; + int ret = -ENOMEM; + + qp = hisi_qm_create_qp(qm, alg_type); + if (IS_ERR(qp)) + return PTR_ERR(qp); + + qp_ctx = &ctx->qp_ctx[qp_ctx_id]; + qp->req_type = 0; + qp->qp_ctx = qp_ctx; + qp->req_cb = sec_req_cb; + qp_ctx->qp = qp; + qp_ctx->ctx = ctx; + + mutex_init(&qp_ctx->req_lock); + atomic_set(&qp_ctx->pending_reqs, 0); + idr_init(&qp_ctx->req_idr); + + qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC); + if (!qp_ctx->req_list) + goto err_destroy_idr; + + qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, + SEC_SGL_SGE_NR); + if (!qp_ctx->c_in_pool) { + dev_err(dev, "fail to create sgl pool for input!\n"); + goto err_free_req_list; + } + + qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, + SEC_SGL_SGE_NR); + if (!qp_ctx->c_out_pool) { + dev_err(dev, "fail to create sgl pool for output!\n"); + goto err_free_c_in_pool; + } + + ret = ctx->req_op->resource_alloc(ctx, qp_ctx); + if (ret) + goto err_free_c_out_pool; + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) + goto err_queue_free; + + return 0; + +err_queue_free: + ctx->req_op->resource_free(ctx, qp_ctx); +err_free_c_out_pool: + hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); +err_free_c_in_pool: + hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); +err_free_req_list: + kfree(qp_ctx->req_list); +err_destroy_idr: + idr_destroy(&qp_ctx->req_idr); + hisi_qm_release_qp(qp); + + return ret; +} + +static void sec_release_qp_ctx(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct device *dev = SEC_CTX_DEV(ctx); + + hisi_qm_stop_qp(qp_ctx->qp); + ctx->req_op->resource_free(ctx, qp_ctx); + + hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); + hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); + + idr_destroy(&qp_ctx->req_idr); + kfree(qp_ctx->req_list); + hisi_qm_release_qp(qp_ctx->qp); +} + +static int sec_skcipher_init(struct crypto_skcipher *tfm) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx; + struct sec_dev *sec; + struct device *dev; + struct hisi_qm *qm; + int i, ret; + + crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); + + sec = sec_find_device(cpu_to_node(smp_processor_id())); + if (!sec) { + pr_err("find no Hisilicon SEC device!\n"); + return -ENODEV; + } + ctx->sec = sec; + qm = &sec->qm; + dev = &qm->pdev->dev; + ctx->hlf_q_num = sec->ctx_q_num >> 0x1; + + /* Half of queue depth is taken as fake requests limit in the queue. */ + ctx->fake_req_limit = QM_Q_DEPTH >> 0x1; + ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), + GFP_KERNEL); + if (!ctx->qp_ctx) + return -ENOMEM; + + for (i = 0; i < sec->ctx_q_num; i++) { + ret = sec_create_qp_ctx(qm, ctx, i, 0); + if (ret) + goto err_sec_release_qp_ctx; + } + + c_ctx = &ctx->c_ctx; + c_ctx->ivsize = crypto_skcipher_ivsize(tfm); + if (c_ctx->ivsize > SEC_IV_SIZE) { + dev_err(dev, "get error iv size!\n"); + ret = -EINVAL; + goto err_sec_release_qp_ctx; + } + c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE, + &c_ctx->c_key_dma, GFP_KERNEL); + if (!c_ctx->c_key) { + ret = -ENOMEM; + goto err_sec_release_qp_ctx; + } + + return 0; + +err_sec_release_qp_ctx: + for (i = i - 1; i >= 0; i--) + sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); + + kfree(ctx->qp_ctx); + return ret; +} + +static void sec_skcipher_exit(struct crypto_skcipher *tfm) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + int i = 0; + + if (c_ctx->c_key) { + dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + c_ctx->c_key, c_ctx->c_key_dma); + c_ctx->c_key = NULL; + } + + for (i = 0; i < ctx->sec->ctx_q_num; i++) + sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); + + kfree(ctx->qp_ctx); +} + +static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, + const u32 keylen, + const enum sec_cmode c_mode) +{ + switch (keylen) { + case SEC_DES3_2KEY_SIZE: + c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; + break; + case SEC_DES3_3KEY_SIZE: + c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, + const u32 keylen, + const enum sec_cmode c_mode) +{ + if (c_mode == SEC_CMODE_XTS) { + switch (keylen) { + case SEC_XTS_MIN_KEY_SIZE: + c_ctx->c_key_len = SEC_CKEY_128BIT; + break; + case SEC_XTS_MAX_KEY_SIZE: + c_ctx->c_key_len = SEC_CKEY_256BIT; + break; + default: + pr_err("hisi_sec2: xts mode key error!\n"); + return -EINVAL; + } + } else { + switch (keylen) { + case AES_KEYSIZE_128: + c_ctx->c_key_len = SEC_CKEY_128BIT; + break; + case AES_KEYSIZE_192: + c_ctx->c_key_len = SEC_CKEY_192BIT; + break; + case AES_KEYSIZE_256: + c_ctx->c_key_len = SEC_CKEY_256BIT; + break; + default: + pr_err("hisi_sec2: aes key error!\n"); + return -EINVAL; + } + } + + return 0; +} + +static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + const u32 keylen, const enum sec_calg c_alg, + const enum sec_cmode c_mode) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + int ret; + + if (c_mode == SEC_CMODE_XTS) { + ret = xts_verify_key(tfm, key, keylen); + if (ret) { + dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); + return ret; + } + } + + c_ctx->c_alg = c_alg; + c_ctx->c_mode = c_mode; + + switch (c_alg) { + case SEC_CALG_3DES: + ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); + break; + case SEC_CALG_AES: + case SEC_CALG_SM4: + ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); + break; + default: + return -EINVAL; + } + + if (ret) { + dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); + return ret; + } + + memcpy(c_ctx->c_key, key, keylen); + + return 0; +} + +#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ +static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ + u32 keylen) \ +{ \ + return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ +} + +GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) +GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) + +GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) +GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) + +GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) +GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) + +static int sec_skcipher_get_res(struct sec_ctx *ctx, + struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct sec_cipher_res *c_res = qp_ctx->alg_meta_data; + struct sec_cipher_req *c_req = &req->c_req; + int req_id = req->req_id; + + c_req->c_ivin = c_res[req_id].c_ivin; + c_req->c_ivin_dma = c_res[req_id].c_ivin_dma; + + return 0; +} + +static int sec_skcipher_resource_alloc(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct device *dev = SEC_CTX_DEV(ctx); + struct sec_cipher_res *res; + int i; + + res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, + &res->c_ivin_dma, GFP_KERNEL); + if (!res->c_ivin) { + kfree(res); + return -ENOMEM; + } + + for (i = 1; i < QM_Q_DEPTH; i++) { + res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; + res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; + } + qp_ctx->alg_meta_data = res; + + return 0; +} + +static void sec_skcipher_resource_free(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct sec_cipher_res *res = qp_ctx->alg_meta_data; + struct device *dev = SEC_CTX_DEV(ctx); + + if (!res) + return; + + dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma); + kfree(res); +} + +static int sec_skcipher_map(struct device *dev, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) +{ + struct sec_cipher_req *c_req = &req->c_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + + c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, + qp_ctx->c_in_pool, + req->req_id, + &c_req->c_in_dma); + + if (IS_ERR(c_req->c_in)) { + dev_err(dev, "fail to dma map input sgl buffers!\n"); + return PTR_ERR(c_req->c_in); + } + + if (dst == src) { + c_req->c_out = c_req->c_in; + c_req->c_out_dma = c_req->c_in_dma; + } else { + c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, + qp_ctx->c_out_pool, + req->req_id, + &c_req->c_out_dma); + + if (IS_ERR(c_req->c_out)) { + dev_err(dev, "fail to dma map output sgl buffers!\n"); + hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); + return PTR_ERR(c_req->c_out); + } + } + + return 0; +} + +static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_cipher_req *c_req = &req->c_req; + + return sec_skcipher_map(SEC_CTX_DEV(ctx), req, + c_req->sk_req->src, c_req->sk_req->dst); +} + +static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) +{ + struct device *dev = SEC_CTX_DEV(ctx); + struct sec_cipher_req *c_req = &req->c_req; + struct skcipher_request *sk_req = c_req->sk_req; + + if (sk_req->dst != sk_req->src) + hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in); + + hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out); +} + +static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) +{ + int ret; + + ret = ctx->req_op->buf_map(ctx, req); + if (ret) + return ret; + + ctx->req_op->do_transfer(ctx, req); + + ret = ctx->req_op->bd_fill(ctx, req); + if (ret) + goto unmap_req_buf; + + return ret; + +unmap_req_buf: + ctx->req_op->buf_unmap(ctx, req); + + return ret; +} + +static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) +{ + ctx->req_op->buf_unmap(ctx, req); +} + +static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) +{ + struct skcipher_request *sk_req = req->c_req.sk_req; + struct sec_cipher_req *c_req = &req->c_req; + + c_req->c_len = sk_req->cryptlen; + memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); +} + +static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct sec_cipher_req *c_req = &req->c_req; + struct sec_sqe *sec_sqe = &req->sec_sqe; + u8 de = 0; + u8 scene, sa_type, da_type; + u8 bd_type, cipher; + + memset(sec_sqe, 0, sizeof(struct sec_sqe)); + + sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); + sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); + sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); + sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); + + sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << + SEC_CMODE_OFFSET); + sec_sqe->type2.c_alg = c_ctx->c_alg; + sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << + SEC_CKEY_OFFSET); + + bd_type = SEC_BD_TYPE2; + if (c_req->encrypt) + cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; + else + cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; + sec_sqe->type_cipher_auth = bd_type | cipher; + + sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; + scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; + if (c_req->c_in_dma != c_req->c_out_dma) + de = 0x1 << SEC_DE_OFFSET; + + sec_sqe->sds_sa_type = (de | scene | sa_type); + + /* Just set DST address type */ + da_type = SEC_SGL << SEC_DST_SGL_OFFSET; + sec_sqe->sdm_addr_type |= da_type; + + sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); + sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); + + return 0; +} + +static void sec_update_iv(struct sec_req *req) +{ + struct skcipher_request *sk_req = req->c_req.sk_req; + u32 iv_size = req->ctx->c_ctx.ivsize; + struct scatterlist *sgl; + size_t sz; + + if (req->c_req.encrypt) + sgl = sk_req->dst; + else + sgl = sk_req->src; + + sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv, + iv_size, sk_req->cryptlen - iv_size); + if (sz != iv_size) + dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); +} + +static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req) +{ + struct skcipher_request *sk_req = req->c_req.sk_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + + atomic_dec(&qp_ctx->pending_reqs); + sec_free_req_id(req); + + /* IV output at encrypto of CBC mode */ + if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) + sec_update_iv(req); + + if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0)) + sk_req->base.complete(&sk_req->base, -EINPROGRESS); + + sk_req->base.complete(&sk_req->base, req->err_type); +} + +static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + + atomic_dec(&qp_ctx->pending_reqs); + sec_free_req_id(req); + sec_put_queue_id(ctx, req); +} + +static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx; + int issue_id, ret; + + /* To load balance */ + issue_id = sec_get_queue_id(ctx, req); + qp_ctx = &ctx->qp_ctx[issue_id]; + + req->req_id = sec_alloc_req_id(req, qp_ctx); + if (req->req_id < 0) { + sec_put_queue_id(ctx, req); + return req->req_id; + } + + if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) + req->fake_busy = 1; + else + req->fake_busy = 0; + + ret = ctx->req_op->get_res(ctx, req); + if (ret) { + atomic_dec(&qp_ctx->pending_reqs); + sec_request_uninit(ctx, req); + dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n"); + } + + return ret; +} + +static int sec_process(struct sec_ctx *ctx, struct sec_req *req) +{ + int ret; + + ret = sec_request_init(ctx, req); + if (ret) + return ret; + + ret = sec_request_transfer(ctx, req); + if (ret) + goto err_uninit_req; + + /* Output IV as decrypto */ + if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) + sec_update_iv(req); + + ret = ctx->req_op->bd_send(ctx, req); + if (ret != -EBUSY && ret != -EINPROGRESS) { + dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n"); + goto err_send_req; + } + + return ret; + +err_send_req: + /* As failing, restore the IV from user */ + if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) + memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin, + ctx->c_ctx.ivsize); + + sec_request_untransfer(ctx, req); +err_uninit_req: + sec_request_uninit(ctx, req); + + return ret; +} + +static struct sec_req_op sec_req_ops_tbl = { + .get_res = sec_skcipher_get_res, + .resource_alloc = sec_skcipher_resource_alloc, + .resource_free = sec_skcipher_resource_free, + .buf_map = sec_skcipher_sgl_map, + .buf_unmap = sec_skcipher_sgl_unmap, + .do_transfer = sec_skcipher_copy_iv, + .bd_fill = sec_skcipher_bd_fill, + .bd_send = sec_bd_send, + .callback = sec_skcipher_callback, + .process = sec_process, +}; + +static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->req_op = &sec_req_ops_tbl; + + return sec_skcipher_init(tfm); +} + +static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) +{ + sec_skcipher_exit(tfm); +} + +static int sec_skcipher_param_check(struct sec_ctx *ctx, + struct skcipher_request *sk_req) +{ + u8 c_alg = ctx->c_ctx.c_alg; + struct device *dev = SEC_CTX_DEV(ctx); + + if (!sk_req->src || !sk_req->dst) { + dev_err(dev, "skcipher input param error!\n"); + return -EINVAL; + } + + if (c_alg == SEC_CALG_3DES) { + if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) { + dev_err(dev, "skcipher 3des input length error!\n"); + return -EINVAL; + } + return 0; + } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { + if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) { + dev_err(dev, "skcipher aes input length error!\n"); + return -EINVAL; + } + return 0; + } + + dev_err(dev, "skcipher algorithm error!\n"); + return -EINVAL; +} + +static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); + struct sec_req *req = skcipher_request_ctx(sk_req); + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + if (!sk_req->cryptlen) + return 0; + + ret = sec_skcipher_param_check(ctx, sk_req); + if (ret) + return ret; + + req->c_req.sk_req = sk_req; + req->c_req.encrypt = encrypt; + req->ctx = ctx; + + return ctx->req_op->process(ctx, req); +} + +static int sec_skcipher_encrypt(struct skcipher_request *sk_req) +{ + return sec_skcipher_crypto(sk_req, true); +} + +static int sec_skcipher_decrypt(struct skcipher_request *sk_req) +{ + return sec_skcipher_crypto(sk_req, false); +} + +#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ + sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ +{\ + .base = {\ + .cra_name = sec_cra_name,\ + .cra_driver_name = "hisi_sec_"sec_cra_name,\ + .cra_priority = SEC_PRIORITY,\ + .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_blocksize = blk_size,\ + .cra_ctxsize = sizeof(struct sec_ctx),\ + .cra_module = THIS_MODULE,\ + },\ + .init = ctx_init,\ + .exit = ctx_exit,\ + .setkey = sec_set_key,\ + .decrypt = sec_skcipher_decrypt,\ + .encrypt = sec_skcipher_encrypt,\ + .min_keysize = sec_min_key_size,\ + .max_keysize = sec_max_key_size,\ + .ivsize = iv_size,\ +}, + +#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ + max_key_size, blk_size, iv_size) \ + SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ + sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) + +static struct skcipher_alg sec_algs[] = { + SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, + AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, + AES_BLOCK_SIZE, 0) + + SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, + AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, + AES_BLOCK_SIZE, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, + SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, + AES_BLOCK_SIZE, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, + SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, + DES3_EDE_BLOCK_SIZE, 0) + + SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, + SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, + DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, + SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, + AES_BLOCK_SIZE, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, + AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, + AES_BLOCK_SIZE, AES_BLOCK_SIZE) +}; + +int sec_register_to_crypto(void) +{ + int ret = 0; + + /* To avoid repeat register */ + mutex_lock(&sec_algs_lock); + if (++sec_active_devs == 1) + ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs)); + mutex_unlock(&sec_algs_lock); + + return ret; +} + +void sec_unregister_from_crypto(void) +{ + mutex_lock(&sec_algs_lock); + if (--sec_active_devs == 0) + crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs)); + mutex_unlock(&sec_algs_lock); +} diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h new file mode 100644 index 000000000000..097dce828340 --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 HiSilicon Limited. */ + +#ifndef __HISI_SEC_V2_CRYPTO_H +#define __HISI_SEC_V2_CRYPTO_H + +#define SEC_IV_SIZE 24 +#define SEC_MAX_KEY_SIZE 64 +#define SEC_COMM_SCENE 0 + +enum sec_calg { + SEC_CALG_3DES = 0x1, + SEC_CALG_AES = 0x2, + SEC_CALG_SM4 = 0x3, +}; + +enum sec_cmode { + SEC_CMODE_ECB = 0x0, + SEC_CMODE_CBC = 0x1, + SEC_CMODE_CTR = 0x4, + SEC_CMODE_XTS = 0x7, +}; + +enum sec_ckey_type { + SEC_CKEY_128BIT = 0x0, + SEC_CKEY_192BIT = 0x1, + SEC_CKEY_256BIT = 0x2, + SEC_CKEY_3DES_3KEY = 0x1, + SEC_CKEY_3DES_2KEY = 0x3, +}; + +enum sec_bd_type { + SEC_BD_TYPE1 = 0x1, + SEC_BD_TYPE2 = 0x2, +}; + +enum sec_cipher_dir { + SEC_CIPHER_ENC = 0x1, + SEC_CIPHER_DEC = 0x2, +}; + +enum sec_addr_type { + SEC_PBUF = 0x0, + SEC_SGL = 0x1, + SEC_PRP = 0x2, +}; + +struct sec_sqe_type2 { + + /* + * mac_len: 0~5 bits + * a_key_len: 6~10 bits + * a_alg: 11~16 bits + */ + __le32 mac_key_alg; + + /* + * c_icv_len: 0~5 bits + * c_width: 6~8 bits + * c_key_len: 9~11 bits + * c_mode: 12~15 bits + */ + __le16 icvw_kmode; + + /* c_alg: 0~3 bits */ + __u8 c_alg; + __u8 rsvd4; + + /* + * a_len: 0~23 bits + * iv_offset_l: 24~31 bits + */ + __le32 alen_ivllen; + + /* + * c_len: 0~23 bits + * iv_offset_h: 24~31 bits + */ + __le32 clen_ivhlen; + + __le16 auth_src_offset; + __le16 cipher_src_offset; + __le16 cs_ip_header_offset; + __le16 cs_udp_header_offset; + __le16 pass_word_len; + __le16 dk_len; + __u8 salt3; + __u8 salt2; + __u8 salt1; + __u8 salt0; + + __le16 tag; + __le16 rsvd5; + + /* + * c_pad_type: 0~3 bits + * c_pad_len: 4~11 bits + * c_pad_data_type: 12~15 bits + */ + __le16 cph_pad; + + /* c_pad_len_field: 0~1 bits */ + __le16 c_pad_len_field; + + + __le64 long_a_data_len; + __le64 a_ivin_addr; + __le64 a_key_addr; + __le64 mac_addr; + __le64 c_ivin_addr; + __le64 c_key_addr; + + __le64 data_src_addr; + __le64 data_dst_addr; + + /* + * done: 0 bit + * icv: 1~3 bits + * csc: 4~6 bits + * flag: 7-10 bits + * dif_check: 11~13 bits + */ + __le16 done_flag; + + __u8 error_type; + __u8 warning_type; + __u8 mac_i3; + __u8 mac_i2; + __u8 mac_i1; + __u8 mac_i0; + __le16 check_sum_i; + __u8 tls_pad_len_i; + __u8 rsvd12; + __le32 counter; +}; + +struct sec_sqe { + /* + * type: 0~3 bits + * cipher: 4~5 bits + * auth: 6~7 bit s + */ + __u8 type_cipher_auth; + + /* + * seq: 0 bit + * de: 1~2 bits + * scene: 3~6 bits + * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits + */ + __u8 sds_sa_type; + + /* + * src_addr_type: 0~1 bits, not used now, + * if support PRP, set this field, or set zero. + * dst_addr_type: 2~4 bits + * mac_addr_type: 5~7 bits + */ + __u8 sdm_addr_type; + __u8 rsvd0; + + /* + * nonce_len(type2): 0~3 bits + * huk(type2): 4 bit + * key_s(type2): 5 bit + * ci_gen: 6~7 bits + */ + __u8 huk_key_ci; + + /* + * ai_gen: 0~1 bits + * a_pad(type2): 2~3 bits + * c_s(type2): 4~5 bits + */ + __u8 ai_apd_cs; + + /* + * rhf(type2): 0 bit + * c_key_type: 1~2 bits + * a_key_type: 3~4 bits + * write_frame_len(type2): 5~7 bits + */ + __u8 rca_key_frm; + + /* + * cal_iv_addr_en(type2): 0 bit + * tls_up(type2): 1 bit + * inveld: 7 bit + */ + __u8 iv_tls_ld; + + /* Just using type2 BD now */ + struct sec_sqe_type2 type2; +}; + +int sec_register_to_crypto(void); +void sec_unregister_from_crypto(void); +#endif diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c new file mode 100644 index 000000000000..74f0654028c9 --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -0,0 +1,1095 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ + +#include <linux/acpi.h> +#include <linux/aer.h> +#include <linux/bitops.h> +#include <linux/debugfs.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/seq_file.h> +#include <linux/topology.h> + +#include "sec.h" + +#define SEC_VF_NUM 63 +#define SEC_QUEUE_NUM_V1 4096 +#define SEC_QUEUE_NUM_V2 1024 +#define SEC_PF_PCI_DEVICE_ID 0xa255 +#define SEC_VF_PCI_DEVICE_ID 0xa256 + +#define SEC_XTS_MIV_ENABLE_REG 0x301384 +#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF +#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF +#define SEC_BD_ERR_CHK_EN1 0xfffff7fd +#define SEC_BD_ERR_CHK_EN2 0xffffbfff + +#define SEC_SQE_SIZE 128 +#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) +#define SEC_PF_DEF_Q_NUM 64 +#define SEC_PF_DEF_Q_BASE 0 +#define SEC_CTX_Q_NUM_DEF 24 + +#define SEC_CTRL_CNT_CLR_CE 0x301120 +#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) +#define SEC_ENGINE_PF_CFG_OFF 0x300000 +#define SEC_ACC_COMMON_REG_OFF 0x1000 +#define SEC_CORE_INT_SOURCE 0x301010 +#define SEC_CORE_INT_MASK 0x301000 +#define SEC_CORE_INT_STATUS 0x301008 +#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 +#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF) +#define SEC_ECC_ADDR(err) ((err) >> 0) +#define SEC_CORE_INT_DISABLE 0x0 +#define SEC_CORE_INT_ENABLE 0x1ff + +#define SEC_RAS_CE_REG 0x50 +#define SEC_RAS_FE_REG 0x54 +#define SEC_RAS_NFE_REG 0x58 +#define SEC_RAS_CE_ENB_MSK 0x88 +#define SEC_RAS_FE_ENB_MSK 0x0 +#define SEC_RAS_NFE_ENB_MSK 0x177 +#define SEC_RAS_DISABLE 0x0 +#define SEC_MEM_START_INIT_REG 0x0100 +#define SEC_MEM_INIT_DONE_REG 0x0104 +#define SEC_QM_ABNORMAL_INT_MASK 0x100004 + +#define SEC_CONTROL_REG 0x0200 +#define SEC_TRNG_EN_SHIFT 8 +#define SEC_CLK_GATE_ENABLE BIT(3) +#define SEC_CLK_GATE_DISABLE (~BIT(3)) +#define SEC_AXI_SHUTDOWN_ENABLE BIT(12) +#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF + +#define SEC_INTERFACE_USER_CTRL0_REG 0x0220 +#define SEC_INTERFACE_USER_CTRL1_REG 0x0224 +#define SEC_BD_ERR_CHK_EN_REG1 0x0384 +#define SEC_BD_ERR_CHK_EN_REG2 0x038c + +#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) +#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) +#define SEC_CORE_INT_STATUS_M_ECC BIT(2) + +#define SEC_DELAY_10_US 10 +#define SEC_POLL_TIMEOUT_US 1000 +#define SEC_VF_CNT_MASK 0xffffffc0 +#define SEC_DBGFS_VAL_MAX_LEN 20 + +#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ + SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) + +struct sec_hw_error { + u32 int_msk; + const char *msg; +}; + +static const char sec_name[] = "hisi_sec2"; +static struct dentry *sec_debugfs_root; +static LIST_HEAD(sec_list); +static DEFINE_MUTEX(sec_list_lock); + +static const struct sec_hw_error sec_hw_errors[] = { + {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"}, + {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"}, + {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"}, + {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"}, + {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"}, + {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"}, + {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"}, + {.int_msk = BIT(7), .msg = "sec_bd_err_rint"}, + {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"}, + { /* sentinel */ } +}; + +struct sec_dev *sec_find_device(int node) +{ +#define SEC_NUMA_MAX_DISTANCE 100 + int min_distance = SEC_NUMA_MAX_DISTANCE; + int dev_node = 0, free_qp_num = 0; + struct sec_dev *sec, *ret = NULL; + struct hisi_qm *qm; + struct device *dev; + + mutex_lock(&sec_list_lock); + list_for_each_entry(sec, &sec_list, list) { + qm = &sec->qm; + dev = &qm->pdev->dev; +#ifdef CONFIG_NUMA + dev_node = dev->numa_node; + if (dev_node < 0) + dev_node = 0; +#endif + if (node_distance(dev_node, node) < min_distance) { + free_qp_num = hisi_qm_get_free_qp_num(qm); + if (free_qp_num >= sec->ctx_q_num) { + ret = sec; + min_distance = node_distance(dev_node, node); + } + } + } + mutex_unlock(&sec_list_lock); + + return ret; +} + +static const char * const sec_dbg_file_name[] = { + [SEC_CURRENT_QM] = "current_qm", + [SEC_CLEAR_ENABLE] = "clear_enable", +}; + +static struct debugfs_reg32 sec_dfx_regs[] = { + {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, + {"SEC_SAA_EN ", 0x301270}, + {"SEC_BD_LATENCY_MIN ", 0x301600}, + {"SEC_BD_LATENCY_MAX ", 0x301608}, + {"SEC_BD_LATENCY_AVG ", 0x30160C}, + {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, + {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, + {"SEC_BD_NUM_IN_SEC ", 0x301680}, + {"SEC_ECC_1BIT_CNT ", 0x301C00}, + {"SEC_ECC_1BIT_INFO ", 0x301C04}, + {"SEC_ECC_2BIT_CNT ", 0x301C10}, + {"SEC_ECC_2BIT_INFO ", 0x301C14}, + {"SEC_BD_SAA0 ", 0x301C20}, + {"SEC_BD_SAA1 ", 0x301C24}, + {"SEC_BD_SAA2 ", 0x301C28}, + {"SEC_BD_SAA3 ", 0x301C2C}, + {"SEC_BD_SAA4 ", 0x301C30}, + {"SEC_BD_SAA5 ", 0x301C34}, + {"SEC_BD_SAA6 ", 0x301C38}, + {"SEC_BD_SAA7 ", 0x301C3C}, + {"SEC_BD_SAA8 ", 0x301C40}, +}; + +static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) +{ + struct pci_dev *pdev; + u32 n, q_num; + u8 rev_id; + int ret; + + if (!val) + return -EINVAL; + + pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, + SEC_PF_PCI_DEVICE_ID, NULL); + if (!pdev) { + q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2); + pr_info("No device, suppose queue number is %d!\n", q_num); + } else { + rev_id = pdev->revision; + + switch (rev_id) { + case QM_HW_V1: + q_num = SEC_QUEUE_NUM_V1; + break; + case QM_HW_V2: + q_num = SEC_QUEUE_NUM_V2; + break; + default: + return -EINVAL; + } + } + + ret = kstrtou32(val, 10, &n); + if (ret || !n || n > q_num) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops sec_pf_q_num_ops = { + .set = sec_pf_q_num_set, + .get = param_get_int, +}; +static u32 pf_q_num = SEC_PF_DEF_Q_NUM; +module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); +MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); + +static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) +{ + u32 ctx_q_num; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &ctx_q_num); + if (ret) + return -EINVAL; + + if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) { + pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); + return -EINVAL; + } + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops sec_ctx_q_num_ops = { + .set = sec_ctx_q_num_set, + .get = param_get_int, +}; +static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; +module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); +MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)"); + +static const struct pci_device_id sec_dev_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, sec_dev_ids); + +static inline void sec_add_to_list(struct sec_dev *sec) +{ + mutex_lock(&sec_list_lock); + list_add_tail(&sec->list, &sec_list); + mutex_unlock(&sec_list_lock); +} + +static inline void sec_remove_from_list(struct sec_dev *sec) +{ + mutex_lock(&sec_list_lock); + list_del(&sec->list); + mutex_unlock(&sec_list_lock); +} + +static u8 sec_get_endian(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + u32 reg; + + /* + * As for VF, it is a wrong way to get endian setting by + * reading a register of the engine + */ + if (qm->pdev->is_virtfn) { + dev_err_ratelimited(&qm->pdev->dev, + "cannot access a register in VF!\n"); + return SEC_LE; + } + reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF + + SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG); + + /* BD little endian mode */ + if (!(reg & BIT(0))) + return SEC_LE; + + /* BD 32-bits big endian mode */ + else if (!(reg & BIT(1))) + return SEC_32BE; + + /* BD 64-bits big endian mode */ + else + return SEC_64BE; +} + +static int sec_engine_init(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + int ret; + u32 reg; + + /* disable clock gate control */ + reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg &= SEC_CLK_GATE_DISABLE; + writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + + writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG)); + + ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG), + reg, reg & 0x1, SEC_DELAY_10_US, + SEC_POLL_TIMEOUT_US); + if (ret) { + dev_err(&qm->pdev->dev, "fail to init sec mem\n"); + return ret; + } + + reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg |= (0x1 << SEC_TRNG_EN_SHIFT); + writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + + reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + reg |= SEC_USER0_SMMU_NORMAL; + writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + + reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + reg |= SEC_USER1_SMMU_NORMAL; + writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + + writel_relaxed(SEC_BD_ERR_CHK_EN1, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); + writel_relaxed(SEC_BD_ERR_CHK_EN2, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2)); + + /* enable clock gate control */ + reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg |= SEC_CLK_GATE_ENABLE; + writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + + /* config endian */ + reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg |= sec_get_endian(sec); + writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + + /* Enable sm4 xts mode multiple iv */ + writel_relaxed(SEC_XTS_MIV_ENABLE_MSK, + qm->io_base + SEC_XTS_MIV_ENABLE_REG); + + return 0; +} + +static int sec_set_user_domain_and_cache(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + + /* qm user domain */ + writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); + writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); + writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); + writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); + writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); + + /* qm cache */ + writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); + writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); + + /* disable FLR triggered by BME(bus master enable) */ + writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); + writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); + + /* enable sqc,cqc writeback */ + writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | + CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | + FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); + + return sec_engine_init(sec); +} + +/* sec_debug_regs_clear() - clear the sec debug regs */ +static void sec_debug_regs_clear(struct hisi_qm *qm) +{ + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + + /* clear rdclr_en */ + writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); + + hisi_qm_debug_regs_clear(qm); +} + +static void sec_hw_error_enable(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + u32 val; + + if (qm->ver == QM_HW_V1) { + writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); + dev_info(&qm->pdev->dev, "V1 not support hw error handle\n"); + return; + } + + val = readl(qm->io_base + SEC_CONTROL_REG); + + /* clear SEC hw error source if having */ + writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE); + + /* enable SEC hw error interrupts */ + writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); + + /* enable RAS int */ + writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); + writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); + writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); + + /* enable SEC block master OOO when m-bit error occur */ + val = val | SEC_AXI_SHUTDOWN_ENABLE; + + writel(val, qm->io_base + SEC_CONTROL_REG); +} + +static void sec_hw_error_disable(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + u32 val; + + val = readl(qm->io_base + SEC_CONTROL_REG); + + /* disable RAS int */ + writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); + writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); + writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); + + /* disable SEC hw error interrupts */ + writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); + + /* disable SEC block master OOO when m-bit error occur */ + val = val & SEC_AXI_SHUTDOWN_DISABLE; + + writel(val, qm->io_base + SEC_CONTROL_REG); +} + +static void sec_hw_error_init(struct sec_dev *sec) +{ + if (sec->qm.fun_type == QM_HW_VF) + return; + + hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE, + QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT + | QM_ACC_WB_NOT_READY_TIMEOUT, 0, + QM_DB_RANDOM_INVALID); + sec_hw_error_enable(sec); +} + +static void sec_hw_error_uninit(struct sec_dev *sec) +{ + if (sec->qm.fun_type == QM_HW_VF) + return; + + sec_hw_error_disable(sec); + writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK); +} + +static u32 sec_current_qm_read(struct sec_debug_file *file) +{ + struct hisi_qm *qm = file->qm; + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int sec_current_qm_write(struct sec_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file->qm; + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); + u32 vfq_num; + u32 tmp; + + if (val > sec->num_vfs) + return -EINVAL; + + /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ + if (!val) { + qm->debug.curr_qm_qp_num = qm->qp_num; + } else { + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs; + + if (val == sec->num_vfs) + qm->debug.curr_qm_qp_num = + qm->ctrl_qp_num - qm->qp_num - + (sec->num_vfs - 1) * vfq_num; + else + qm->debug.curr_qm_qp_num = vfq_num; + } + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 sec_clear_enable_read(struct sec_debug_file *file) +{ + struct hisi_qm *qm = file->qm; + + return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & + SEC_CTRL_CNT_CLR_CE_BIT; +} + +static int sec_clear_enable_write(struct sec_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file->qm; + u32 tmp; + + if (val != 1 && val) + return -EINVAL; + + tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & + ~SEC_CTRL_CNT_CLR_CE_BIT) | val; + writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); + + return 0; +} + +static ssize_t sec_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct sec_debug_file *file = filp->private_data; + char tbuf[SEC_DBGFS_VAL_MAX_LEN]; + u32 val; + int ret; + + spin_lock_irq(&file->lock); + + switch (file->index) { + case SEC_CURRENT_QM: + val = sec_current_qm_read(file); + break; + case SEC_CLEAR_ENABLE: + val = sec_clear_enable_read(file); + break; + default: + spin_unlock_irq(&file->lock); + return -EINVAL; + } + + spin_unlock_irq(&file->lock); + ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); + + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t sec_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct sec_debug_file *file = filp->private_data; + char tbuf[SEC_DBGFS_VAL_MAX_LEN]; + unsigned long val; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= SEC_DBGFS_VAL_MAX_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, + pos, buf, count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + spin_lock_irq(&file->lock); + + switch (file->index) { + case SEC_CURRENT_QM: + ret = sec_current_qm_write(file, val); + if (ret) + goto err_input; + break; + case SEC_CLEAR_ENABLE: + ret = sec_clear_enable_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + + spin_unlock_irq(&file->lock); + + return count; + + err_input: + spin_unlock_irq(&file->lock); + return ret; +} + +static const struct file_operations sec_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sec_debug_read, + .write = sec_debug_write, +}; + +static int sec_core_debug_init(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + struct device *dev = &qm->pdev->dev; + struct sec_dfx *dfx = &sec->debug.dfx; + struct debugfs_regset32 *regset; + struct dentry *tmp_d; + + tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root); + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOENT; + + regset->regs = sec_dfx_regs; + regset->nregs = ARRAY_SIZE(sec_dfx_regs); + regset->base = qm->io_base; + + debugfs_create_regset32("regs", 0444, tmp_d, regset); + + debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt); + + debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt); + + return 0; +} + +static int sec_debug_init(struct sec_dev *sec) +{ + int i; + + for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { + spin_lock_init(&sec->debug.files[i].lock); + sec->debug.files[i].index = i; + sec->debug.files[i].qm = &sec->qm; + + debugfs_create_file(sec_dbg_file_name[i], 0600, + sec->qm.debug.debug_root, + sec->debug.files + i, + &sec_dbg_fops); + } + + return sec_core_debug_init(sec); +} + +static int sec_debugfs_init(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + struct device *dev = &qm->pdev->dev; + int ret; + + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + sec_debugfs_root); + ret = hisi_qm_debug_init(qm); + if (ret) + goto failed_to_create; + + if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { + ret = sec_debug_init(sec); + if (ret) + goto failed_to_create; + } + + return 0; + +failed_to_create: + debugfs_remove_recursive(sec_debugfs_root); + + return ret; +} + +static void sec_debugfs_exit(struct sec_dev *sec) +{ + debugfs_remove_recursive(sec->qm.debug.debug_root); +} + +static int sec_pf_probe_init(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + int ret; + + switch (qm->ver) { + case QM_HW_V1: + qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; + break; + + case QM_HW_V2: + qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; + break; + + default: + return -EINVAL; + } + + ret = sec_set_user_domain_and_cache(sec); + if (ret) + return ret; + + sec_hw_error_init(sec); + sec_debug_regs_clear(qm); + + return 0; +} + +static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) +{ + enum qm_hw_ver rev_id; + + rev_id = hisi_qm_get_hw_version(pdev); + if (rev_id == QM_HW_UNKNOWN) + return -ENODEV; + + qm->pdev = pdev; + qm->ver = rev_id; + + qm->sqe_size = SEC_SQE_SIZE; + qm->dev_name = sec_name; + qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? + QM_HW_PF : QM_HW_VF; + qm->use_dma_api = true; + + return hisi_qm_init(qm); +} + +static void sec_qm_uninit(struct hisi_qm *qm) +{ + hisi_qm_uninit(qm); +} + +static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) +{ + if (qm->fun_type == QM_HW_PF) { + qm->qp_base = SEC_PF_DEF_Q_BASE; + qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; + + return sec_pf_probe_init(sec); + } else if (qm->fun_type == QM_HW_VF) { + /* + * have no way to get qm configure in VM in v1 hardware, + * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force + * to trigger only one VF in v1 hardware. + * v2 hardware has no such problem. + */ + if (qm->ver == QM_HW_V1) { + qm->qp_base = SEC_PF_DEF_Q_NUM; + qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; + } else if (qm->ver == QM_HW_V2) { + /* v2 starts to support get vft by mailbox */ + return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + } + } else { + return -ENODEV; + } + + return 0; +} + +static void sec_probe_uninit(struct sec_dev *sec) +{ + sec_hw_error_uninit(sec); +} + +static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct sec_dev *sec; + struct hisi_qm *qm; + int ret; + + sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + + pci_set_drvdata(pdev, sec); + + sec->ctx_q_num = ctx_q_num; + + qm = &sec->qm; + + ret = sec_qm_init(qm, pdev); + if (ret) { + pci_err(pdev, "Failed to pre init qm!\n"); + return ret; + } + + ret = sec_probe_init(qm, sec); + if (ret) { + pci_err(pdev, "Failed to probe!\n"); + goto err_qm_uninit; + } + + ret = hisi_qm_start(qm); + if (ret) { + pci_err(pdev, "Failed to start sec qm!\n"); + goto err_probe_uninit; + } + + ret = sec_debugfs_init(sec); + if (ret) + pci_warn(pdev, "Failed to init debugfs!\n"); + + sec_add_to_list(sec); + + ret = sec_register_to_crypto(); + if (ret < 0) { + pr_err("Failed to register driver to crypto.\n"); + goto err_remove_from_list; + } + + return 0; + +err_remove_from_list: + sec_remove_from_list(sec); + sec_debugfs_exit(sec); + hisi_qm_stop(qm); + +err_probe_uninit: + sec_probe_uninit(sec); + +err_qm_uninit: + sec_qm_uninit(qm); + + return ret; +} + +/* now we only support equal assignment */ +static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs) +{ + struct hisi_qm *qm = &sec->qm; + u32 qp_num = qm->qp_num; + u32 q_base = qp_num; + u32 q_num, remain_q_num; + int i, j, ret; + + if (!num_vfs) + return -EINVAL; + + remain_q_num = qm->ctrl_qp_num - qp_num; + q_num = remain_q_num / num_vfs; + + for (i = 1; i <= num_vfs; i++) { + if (i == num_vfs) + q_num += remain_q_num % num_vfs; + ret = hisi_qm_set_vft(qm, i, q_base, q_num); + if (ret) { + for (j = i; j > 0; j--) + hisi_qm_set_vft(qm, j, 0, 0); + return ret; + } + q_base += q_num; + } + + return 0; +} + +static int sec_clear_vft_config(struct sec_dev *sec) +{ + struct hisi_qm *qm = &sec->qm; + u32 num_vfs = sec->num_vfs; + int ret; + u32 i; + + for (i = 1; i <= num_vfs; i++) { + ret = hisi_qm_set_vft(qm, i, 0, 0); + if (ret) + return ret; + } + + sec->num_vfs = 0; + + return 0; +} + +static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs) +{ + struct sec_dev *sec = pci_get_drvdata(pdev); + int pre_existing_vfs, ret; + u32 num_vfs; + + pre_existing_vfs = pci_num_vf(pdev); + + if (pre_existing_vfs) { + pci_err(pdev, "Can't enable VF. Please disable at first!\n"); + return 0; + } + + num_vfs = min_t(u32, max_vfs, SEC_VF_NUM); + + ret = sec_vf_q_assign(sec, num_vfs); + if (ret) { + pci_err(pdev, "Can't assign queues for VF!\n"); + return ret; + } + + sec->num_vfs = num_vfs; + + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) { + pci_err(pdev, "Can't enable VF!\n"); + sec_clear_vft_config(sec); + return ret; + } + + return num_vfs; +} + +static int sec_sriov_disable(struct pci_dev *pdev) +{ + struct sec_dev *sec = pci_get_drvdata(pdev); + + if (pci_vfs_assigned(pdev)) { + pci_err(pdev, "Can't disable VFs while VFs are assigned!\n"); + return -EPERM; + } + + /* remove in sec_pci_driver will be called to free VF resources */ + pci_disable_sriov(pdev); + + return sec_clear_vft_config(sec); +} + +static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs) + return sec_sriov_enable(pdev, num_vfs); + else + return sec_sriov_disable(pdev); +} + +static void sec_remove(struct pci_dev *pdev) +{ + struct sec_dev *sec = pci_get_drvdata(pdev); + struct hisi_qm *qm = &sec->qm; + + sec_unregister_from_crypto(); + + sec_remove_from_list(sec); + + if (qm->fun_type == QM_HW_PF && sec->num_vfs) + (void)sec_sriov_disable(pdev); + + sec_debugfs_exit(sec); + + (void)hisi_qm_stop(qm); + + if (qm->fun_type == QM_HW_PF) + sec_debug_regs_clear(qm); + + sec_probe_uninit(sec); + + sec_qm_uninit(qm); +} + +static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts) +{ + const struct sec_hw_error *errs = sec_hw_errors; + struct device *dev = &sec->qm.pdev->dev; + u32 err_val; + + while (errs->msg) { + if (errs->int_msk & err_sts) { + dev_err(dev, "%s [error status=0x%x] found\n", + errs->msg, errs->int_msk); + + if (SEC_CORE_INT_STATUS_M_ECC & err_sts) { + err_val = readl(sec->qm.io_base + + SEC_CORE_SRAM_ECC_ERR_INFO); + dev_err(dev, "multi ecc sram num=0x%x\n", + SEC_ECC_NUM(err_val)); + dev_err(dev, "multi ecc sram addr=0x%x\n", + SEC_ECC_ADDR(err_val)); + } + } + errs++; + } +} + +static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec) +{ + u32 err_sts; + + /* read err sts */ + err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS); + if (err_sts) { + sec_log_hw_error(sec, err_sts); + + /* clear error interrupts */ + writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE); + + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev) +{ + struct sec_dev *sec = pci_get_drvdata(pdev); + pci_ers_result_t qm_ret, sec_ret; + + if (!sec) { + pci_err(pdev, "Can't recover error during device init\n"); + return PCI_ERS_RESULT_NONE; + } + + /* log qm error */ + qm_ret = hisi_qm_hw_error_handle(&sec->qm); + + /* log sec error */ + sec_ret = sec_hw_error_handle(sec); + + return (qm_ret == PCI_ERS_RESULT_NEED_RESET || + sec_ret == PCI_ERS_RESULT_NEED_RESET) ? + PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; +} + +static pci_ers_result_t sec_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + if (pdev->is_virtfn) + return PCI_ERS_RESULT_NONE; + + pci_info(pdev, "PCI error detected, state(=%d)!!\n", state); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + return sec_process_hw_error(pdev); +} + +static const struct pci_error_handlers sec_err_handler = { + .error_detected = sec_error_detected, +}; + +static struct pci_driver sec_pci_driver = { + .name = "hisi_sec2", + .id_table = sec_dev_ids, + .probe = sec_probe, + .remove = sec_remove, + .err_handler = &sec_err_handler, + .sriov_configure = sec_sriov_configure, +}; + +static void sec_register_debugfs(void) +{ + if (!debugfs_initialized()) + return; + + sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); +} + +static void sec_unregister_debugfs(void) +{ + debugfs_remove_recursive(sec_debugfs_root); +} + +static int __init sec_init(void) +{ + int ret; + + sec_register_debugfs(); + + ret = pci_register_driver(&sec_pci_driver); + if (ret < 0) { + sec_unregister_debugfs(); + pr_err("Failed to register pci driver.\n"); + return ret; + } + + return 0; +} + +static void __exit sec_exit(void) +{ + pci_unregister_driver(&sec_pci_driver); + sec_unregister_debugfs(); +} + +module_init(sec_init); +module_exit(sec_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); +MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); +MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); +MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index e083d172b618..012023c347b1 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -2,37 +2,13 @@ /* Copyright (c) 2019 HiSilicon Limited. */ #include <linux/dma-mapping.h> #include <linux/module.h> -#include "./sgl.h" +#include <linux/slab.h> +#include "qm.h" #define HISI_ACC_SGL_SGE_NR_MIN 1 -#define HISI_ACC_SGL_SGE_NR_MAX 255 -#define HISI_ACC_SGL_SGE_NR_DEF 10 #define HISI_ACC_SGL_NR_MAX 256 #define HISI_ACC_SGL_ALIGN_SIZE 64 - -static int acc_sgl_sge_set(const char *val, const struct kernel_param *kp) -{ - int ret; - u32 n; - - if (!val) - return -EINVAL; - - ret = kstrtou32(val, 10, &n); - if (ret != 0 || n > HISI_ACC_SGL_SGE_NR_MAX || n == 0) - return -EINVAL; - - return param_set_int(val, kp); -} - -static const struct kernel_param_ops acc_sgl_sge_ops = { - .set = acc_sgl_sge_set, - .get = param_get_int, -}; - -static u32 acc_sgl_sge_nr = HISI_ACC_SGL_SGE_NR_DEF; -module_param_cb(acc_sgl_sge_nr, &acc_sgl_sge_ops, &acc_sgl_sge_nr, 0444); -MODULE_PARM_DESC(acc_sgl_sge_nr, "Number of sge in sgl(1-255)"); +#define HISI_ACC_MEM_BLOCK_NR 5 struct acc_hw_sge { dma_addr_t buf; @@ -55,37 +31,91 @@ struct hisi_acc_hw_sgl { struct acc_hw_sge sge_entries[]; } __aligned(1); +struct hisi_acc_sgl_pool { + struct mem_block { + struct hisi_acc_hw_sgl *sgl; + dma_addr_t sgl_dma; + size_t size; + } mem_block[HISI_ACC_MEM_BLOCK_NR]; + u32 sgl_num_per_block; + u32 block_num; + u32 count; + u32 sge_nr; + size_t sgl_size; +}; + /** * hisi_acc_create_sgl_pool() - Create a hw sgl pool. * @dev: The device which hw sgl pool belongs to. - * @pool: Pointer of pool. * @count: Count of hisi_acc_hw_sgl in pool. + * @sge_nr: The count of sge in hw_sgl * * This function creates a hw sgl pool, after this user can get hw sgl memory * from it. */ -int hisi_acc_create_sgl_pool(struct device *dev, - struct hisi_acc_sgl_pool *pool, u32 count) +struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, + u32 count, u32 sge_nr) { - u32 sgl_size; - u32 size; + u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0; + struct hisi_acc_sgl_pool *pool; + struct mem_block *block; + u32 i, j; - if (!dev || !pool || !count) - return -EINVAL; + if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX) + return ERR_PTR(-EINVAL); - sgl_size = sizeof(struct acc_hw_sge) * acc_sgl_sge_nr + + sgl_size = sizeof(struct acc_hw_sge) * sge_nr + sizeof(struct hisi_acc_hw_sgl); - size = sgl_size * count; + block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1)); + sgl_num_per_block = block_size / sgl_size; + block_num = count / sgl_num_per_block; + remain_sgl = count % sgl_num_per_block; + + if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) || + (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1)) + return ERR_PTR(-EINVAL); + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + block = pool->mem_block; + + for (i = 0; i < block_num; i++) { + block[i].sgl = dma_alloc_coherent(dev, block_size, + &block[i].sgl_dma, + GFP_KERNEL); + if (!block[i].sgl) + goto err_free_mem; + + block[i].size = block_size; + } + + if (remain_sgl > 0) { + block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size, + &block[i].sgl_dma, + GFP_KERNEL); + if (!block[i].sgl) + goto err_free_mem; - pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL); - if (!pool->sgl) - return -ENOMEM; + block[i].size = remain_sgl * sgl_size; + } - pool->size = size; + pool->sgl_num_per_block = sgl_num_per_block; + pool->block_num = remain_sgl ? block_num + 1 : block_num; pool->count = count; pool->sgl_size = sgl_size; + pool->sge_nr = sge_nr; - return 0; + return pool; + +err_free_mem: + for (j = 0; j < i; j++) { + dma_free_coherent(dev, block_size, block[j].sgl, + block[j].sgl_dma); + memset(block + j, 0, sizeof(*block)); + } + kfree(pool); + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); @@ -98,38 +128,57 @@ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); */ void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool) { - dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma); - memset(pool, 0, sizeof(struct hisi_acc_sgl_pool)); + struct mem_block *block; + int i; + + if (!dev || !pool) + return; + + block = pool->mem_block; + + for (i = 0; i < pool->block_num; i++) + dma_free_coherent(dev, block[i].size, block[i].sgl, + block[i].sgl_dma); + + kfree(pool); } EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool); -struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index, - dma_addr_t *hw_sgl_dma) +static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma) { - if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl) + struct mem_block *block; + u32 block_index, offset; + + if (!pool || !hw_sgl_dma || index >= pool->count) return ERR_PTR(-EINVAL); - *hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index; - return (void *)pool->sgl + pool->sgl_size * index; -} + block = pool->mem_block; + block_index = index / pool->sgl_num_per_block; + offset = index % pool->sgl_num_per_block; -void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {} + *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset; + return (void *)block[block_index].sgl + pool->sgl_size * offset; +} static void sg_map_to_hw_sg(struct scatterlist *sgl, struct acc_hw_sge *hw_sge) { - hw_sge->buf = sgl->dma_address; - hw_sge->len = sgl->dma_length; + hw_sge->buf = sg_dma_address(sgl); + hw_sge->len = cpu_to_le32(sg_dma_len(sgl)); } static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) { - hw_sgl->entry_sum_in_sgl++; + u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl); + + var++; + hw_sgl->entry_sum_in_sgl = cpu_to_le16(var); } static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum) { - hw_sgl->entry_sum_in_chain = sum; + hw_sgl->entry_sum_in_chain = cpu_to_le16(sum); } /** @@ -153,10 +202,13 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, dma_addr_t curr_sgl_dma = 0; struct acc_hw_sge *curr_hw_sge; struct scatterlist *sg; - int sg_n = sg_nents(sgl); - int i, ret; + int i, ret, sg_n; - if (!dev || !sgl || !pool || !hw_sgl_dma || sg_n > acc_sgl_sge_nr) + if (!dev || !sgl || !pool || !hw_sgl_dma) + return ERR_PTR(-EINVAL); + + sg_n = sg_nents(sgl); + if (sg_n > pool->sge_nr) return ERR_PTR(-EINVAL); ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); @@ -164,11 +216,12 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, return ERR_PTR(-EINVAL); curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); - if (!curr_hw_sgl) { - ret = -ENOMEM; - goto err_unmap_sg; + if (IS_ERR(curr_hw_sgl)) { + dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + return ERR_PTR(-ENOMEM); + } - curr_hw_sgl->entry_length_in_sgl = acc_sgl_sge_nr; + curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); curr_hw_sge = curr_hw_sgl->sge_entries; for_each_sg(sgl, sg, sg_n, i) { @@ -177,14 +230,10 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, curr_hw_sge++; } - update_hw_sgl_sum_sge(curr_hw_sgl, acc_sgl_sge_nr); + update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr); *hw_sgl_dma = curr_sgl_dma; return curr_hw_sgl; - -err_unmap_sg: - dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); - return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); @@ -201,6 +250,9 @@ EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, struct hisi_acc_hw_sgl *hw_sgl) { + if (!dev || !sgl || !hw_sgl) + return; + dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL); hw_sgl->entry_sum_in_chain = 0; diff --git a/drivers/crypto/hisilicon/sgl.h b/drivers/crypto/hisilicon/sgl.h deleted file mode 100644 index 3ac8871c7acf..000000000000 --- a/drivers/crypto/hisilicon/sgl.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2019 HiSilicon Limited. */ -#ifndef HISI_ACC_SGL_H -#define HISI_ACC_SGL_H - -struct hisi_acc_sgl_pool { - struct hisi_acc_hw_sgl *sgl; - dma_addr_t sgl_dma; - size_t size; - u32 count; - size_t sgl_size; -}; - -struct hisi_acc_hw_sgl * -hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, - struct scatterlist *sgl, - struct hisi_acc_sgl_pool *pool, - u32 index, dma_addr_t *hw_sgl_dma); -void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, - struct hisi_acc_hw_sgl *hw_sgl); -int hisi_acc_create_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool, - u32 count); -void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool); -#endif diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index ffb00d987d02..79fc4dd3fe00 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -8,7 +8,6 @@ #include <linux/list.h> #include "../qm.h" -#include "../sgl.h" /* hisi_zip_sqe dw3 */ #define HZIP_BD_STATUS_M GENMASK(7, 0) diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 59023545a1c4..795428c1d07e 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -22,6 +22,7 @@ #define HZIP_CTX_Q_NUM 2 #define HZIP_GZIP_HEAD_BUF 256 #define HZIP_ALG_PRIORITY 300 +#define HZIP_SGL_SGE_NR 10 static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c}; static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0, @@ -41,7 +42,7 @@ enum hisi_zip_alg_type { #define TO_HEAD(req_type) \ (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \ - ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : 0) \ + ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \ struct hisi_zip_req { struct acomp_req *req; @@ -67,7 +68,7 @@ struct hisi_zip_qp_ctx { struct hisi_qp *qp; struct hisi_zip_sqe zip_sqe; struct hisi_zip_req_q req_q; - struct hisi_acc_sgl_pool sgl_pool; + struct hisi_acc_sgl_pool *sgl_pool; struct hisi_zip *zip_dev; struct hisi_zip_ctx *ctx; }; @@ -78,6 +79,30 @@ struct hisi_zip_ctx { struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM]; }; +static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) +{ + int ret; + u16 n; + + if (!val) + return -EINVAL; + + ret = kstrtou16(val, 10, &n); + if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops sgl_sge_nr_ops = { + .set = sgl_sge_nr_set, + .get = param_get_int, +}; + +static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; +module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); +MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); + static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) { u32 val; @@ -265,14 +290,15 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx) static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) { struct hisi_zip_qp_ctx *tmp; - int i, ret; + struct device *dev; + int i; for (i = 0; i < HZIP_CTX_Q_NUM; i++) { tmp = &ctx->qp_ctx[i]; - ret = hisi_acc_create_sgl_pool(&tmp->qp->qm->pdev->dev, - &tmp->sgl_pool, - QM_Q_DEPTH << 1); - if (ret < 0) { + dev = &tmp->qp->qm->pdev->dev; + tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1, + sgl_sge_nr); + if (IS_ERR(tmp->sgl_pool)) { if (i == 1) goto err_free_sgl_pool0; return -ENOMEM; @@ -283,7 +309,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) err_free_sgl_pool0: hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev, - &ctx->qp_ctx[QPC_COMP].sgl_pool); + ctx->qp_ctx[QPC_COMP].sgl_pool); return -ENOMEM; } @@ -293,7 +319,7 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx) for (i = 0; i < HZIP_CTX_Q_NUM; i++) hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev, - &ctx->qp_ctx[i].sgl_pool); + ctx->qp_ctx[i].sgl_pool); } static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, @@ -512,7 +538,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; - struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool; + struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; dma_addr_t input; dma_addr_t output; int ret; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 1b2ee96c888d..e1bab1a91333 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -79,47 +79,80 @@ #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 #define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) -#define HZIP_NUMA_DISTANCE 100 #define HZIP_BUF_SIZE 22 static const char hisi_zip_name[] = "hisi_zip"; static struct dentry *hzip_debugfs_root; -LIST_HEAD(hisi_zip_list); -DEFINE_MUTEX(hisi_zip_list_lock); +static LIST_HEAD(hisi_zip_list); +static DEFINE_MUTEX(hisi_zip_list_lock); -#ifdef CONFIG_NUMA -static struct hisi_zip *find_zip_device_numa(int node) +struct hisi_zip_resource { + struct hisi_zip *hzip; + int distance; + struct list_head list; +}; + +static void free_list(struct list_head *head) { - struct hisi_zip *zip = NULL; - struct hisi_zip *hisi_zip; - int min_distance = HZIP_NUMA_DISTANCE; - struct device *dev; + struct hisi_zip_resource *res, *tmp; - list_for_each_entry(hisi_zip, &hisi_zip_list, list) { - dev = &hisi_zip->qm.pdev->dev; - if (node_distance(dev->numa_node, node) < min_distance) { - zip = hisi_zip; - min_distance = node_distance(dev->numa_node, node); - } + list_for_each_entry_safe(res, tmp, head, list) { + list_del(&res->list); + kfree(res); } - - return zip; } -#endif struct hisi_zip *find_zip_device(int node) { - struct hisi_zip *zip = NULL; + struct hisi_zip_resource *res, *tmp; + struct hisi_zip *ret = NULL; + struct hisi_zip *hisi_zip; + struct list_head *n; + struct device *dev; + LIST_HEAD(head); mutex_lock(&hisi_zip_list_lock); -#ifdef CONFIG_NUMA - zip = find_zip_device_numa(node); -#else - zip = list_first_entry(&hisi_zip_list, struct hisi_zip, list); -#endif + + if (IS_ENABLED(CONFIG_NUMA)) { + list_for_each_entry(hisi_zip, &hisi_zip_list, list) { + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + goto err; + + dev = &hisi_zip->qm.pdev->dev; + res->hzip = hisi_zip; + res->distance = node_distance(dev_to_node(dev), node); + + n = &head; + list_for_each_entry(tmp, &head, list) { + if (res->distance < tmp->distance) { + n = &tmp->list; + break; + } + } + list_add_tail(&res->list, n); + } + + list_for_each_entry(tmp, &head, list) { + if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) { + ret = tmp->hzip; + break; + } + } + + free_list(&head); + } else { + ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list); + } + mutex_unlock(&hisi_zip_list_lock); - return zip; + return ret; + +err: + free_list(&head); + mutex_unlock(&hisi_zip_list_lock); + return NULL; } struct hisi_zip_hw_error { @@ -267,6 +300,10 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); static int uacce_mode; module_param(uacce_mode, int, 0); +static u32 vfs_num; +module_param(vfs_num, uint, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)"); + static const struct pci_device_id hisi_zip_dev_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) }, @@ -335,8 +372,7 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state) if (qm->ver == QM_HW_V1) { writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK); - dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n", - qm->ver); + dev_info(&qm->pdev->dev, "Does not support hw error handle\n"); return; } @@ -511,7 +547,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) struct hisi_qm *qm = &hisi_zip->qm; struct device *dev = &qm->pdev->dev; struct debugfs_regset32 *regset; - struct dentry *tmp_d, *tmp; + struct dentry *tmp_d; char buf[HZIP_BUF_SIZE]; int i; @@ -521,10 +557,6 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) else sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM); - tmp_d = debugfs_create_dir(buf, ctrl->debug_root); - if (!tmp_d) - return -ENOENT; - regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) return -ENOENT; @@ -533,9 +565,8 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) regset->nregs = ARRAY_SIZE(hzip_dfx_regs); regset->base = qm->io_base + core_offsets[i]; - tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset); - if (!tmp) - return -ENOENT; + tmp_d = debugfs_create_dir(buf, ctrl->debug_root); + debugfs_create_regset32("regs", 0444, tmp_d, regset); } return 0; @@ -543,7 +574,6 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl) { - struct dentry *tmp; int i; for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) { @@ -551,11 +581,9 @@ static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl) ctrl->files[i].ctrl = ctrl; ctrl->files[i].index = i; - tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600, - ctrl->debug_root, ctrl->files + i, - &ctrl_debug_fops); - if (!tmp) - return -ENOENT; + debugfs_create_file(ctrl_debug_file_name[i], 0600, + ctrl->debug_root, ctrl->files + i, + &ctrl_debug_fops); } return hisi_zip_core_debug_init(ctrl); @@ -569,8 +597,6 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip) int ret; dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); - if (!dev_d) - return -ENOENT; qm->debug.debug_root = dev_d; ret = hisi_qm_debug_init(qm); @@ -652,90 +678,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return 0; } -static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct hisi_zip *hisi_zip; - enum qm_hw_ver rev_id; - struct hisi_qm *qm; - int ret; - - rev_id = hisi_qm_get_hw_version(pdev); - if (rev_id == QM_HW_UNKNOWN) - return -EINVAL; - - hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); - if (!hisi_zip) - return -ENOMEM; - pci_set_drvdata(pdev, hisi_zip); - - qm = &hisi_zip->qm; - qm->pdev = pdev; - qm->ver = rev_id; - - qm->sqe_size = HZIP_SQE_SIZE; - qm->dev_name = hisi_zip_name; - qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : - QM_HW_VF; - switch (uacce_mode) { - case 0: - qm->use_dma_api = true; - break; - case 1: - qm->use_dma_api = false; - break; - case 2: - qm->use_dma_api = true; - break; - default: - return -EINVAL; - } - - ret = hisi_qm_init(qm); - if (ret) { - dev_err(&pdev->dev, "Failed to init qm!\n"); - return ret; - } - - if (qm->fun_type == QM_HW_PF) { - ret = hisi_zip_pf_probe_init(hisi_zip); - if (ret) - return ret; - - qm->qp_base = HZIP_PF_DEF_Q_BASE; - qm->qp_num = pf_q_num; - } else if (qm->fun_type == QM_HW_VF) { - /* - * have no way to get qm configure in VM in v1 hardware, - * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force - * to trigger only one VF in v1 hardware. - * - * v2 hardware has no such problem. - */ - if (qm->ver == QM_HW_V1) { - qm->qp_base = HZIP_PF_DEF_Q_NUM; - qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; - } else if (qm->ver == QM_HW_V2) - /* v2 starts to support get vft by mailbox */ - hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - } - - ret = hisi_qm_start(qm); - if (ret) - goto err_qm_uninit; - - ret = hisi_zip_debugfs_init(hisi_zip); - if (ret) - dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret); - - hisi_zip_add_to_list(hisi_zip); - - return 0; - -err_qm_uninit: - hisi_qm_uninit(qm); - return ret; -} - /* Currently we only support equal assignment */ static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) { @@ -832,6 +774,100 @@ static int hisi_zip_sriov_disable(struct pci_dev *pdev) return hisi_zip_clear_vft_config(hisi_zip); } +static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_zip *hisi_zip; + enum qm_hw_ver rev_id; + struct hisi_qm *qm; + int ret; + + rev_id = hisi_qm_get_hw_version(pdev); + if (rev_id == QM_HW_UNKNOWN) + return -EINVAL; + + hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); + if (!hisi_zip) + return -ENOMEM; + pci_set_drvdata(pdev, hisi_zip); + + qm = &hisi_zip->qm; + qm->pdev = pdev; + qm->ver = rev_id; + + qm->sqe_size = HZIP_SQE_SIZE; + qm->dev_name = hisi_zip_name; + qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : + QM_HW_VF; + switch (uacce_mode) { + case 0: + qm->use_dma_api = true; + break; + case 1: + qm->use_dma_api = false; + break; + case 2: + qm->use_dma_api = true; + break; + default: + return -EINVAL; + } + + ret = hisi_qm_init(qm); + if (ret) { + dev_err(&pdev->dev, "Failed to init qm!\n"); + return ret; + } + + if (qm->fun_type == QM_HW_PF) { + ret = hisi_zip_pf_probe_init(hisi_zip); + if (ret) + return ret; + + qm->qp_base = HZIP_PF_DEF_Q_BASE; + qm->qp_num = pf_q_num; + } else if (qm->fun_type == QM_HW_VF) { + /* + * have no way to get qm configure in VM in v1 hardware, + * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force + * to trigger only one VF in v1 hardware. + * + * v2 hardware has no such problem. + */ + if (qm->ver == QM_HW_V1) { + qm->qp_base = HZIP_PF_DEF_Q_NUM; + qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; + } else if (qm->ver == QM_HW_V2) + /* v2 starts to support get vft by mailbox */ + hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + } + + ret = hisi_qm_start(qm); + if (ret) + goto err_qm_uninit; + + ret = hisi_zip_debugfs_init(hisi_zip); + if (ret) + dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret); + + hisi_zip_add_to_list(hisi_zip); + + if (qm->fun_type == QM_HW_PF && vfs_num > 0) { + ret = hisi_zip_sriov_enable(pdev, vfs_num); + if (ret < 0) + goto err_remove_from_list; + } + + return 0; + +err_remove_from_list: + hisi_zip_remove_from_list(hisi_zip); + hisi_zip_debugfs_exit(hisi_zip); + hisi_qm_stop(qm); +err_qm_uninit: + hisi_qm_uninit(qm); + return ret; +} + static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs) { if (num_vfs == 0) @@ -945,7 +981,7 @@ static struct pci_driver hisi_zip_pci_driver = { .probe = hisi_zip_probe, .remove = hisi_zip_remove, .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? - hisi_zip_sriov_configure : 0, + hisi_zip_sriov_configure : NULL, .err_handler = &hisi_zip_err_handler, }; @@ -955,8 +991,6 @@ static void hisi_zip_register_debugfs(void) return; hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL); - if (IS_ERR_OR_NULL(hzip_debugfs_root)) - hzip_debugfs_root = NULL; } static void hisi_zip_unregister_debugfs(void) |