diff options
Diffstat (limited to 'drivers/crypto/cavium')
20 files changed, 1822 insertions, 832 deletions
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile index e12954791673..f83991aaf820 100644 --- a/drivers/crypto/cavium/nitrox/Makefile +++ b/drivers/crypto/cavium/nitrox/Makefile @@ -6,7 +6,10 @@ n5pf-objs := nitrox_main.o \ nitrox_lib.o \ nitrox_hal.o \ nitrox_reqmgr.o \ - nitrox_algs.o + nitrox_algs.o \ + nitrox_mbx.o \ + nitrox_skcipher.o \ + nitrox_aead.o n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c new file mode 100644 index 000000000000..4f43eacd2557 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> +#include <linux/printk.h> +#include <linux/crypto.h> +#include <linux/rtnetlink.h> + +#include <crypto/aead.h> +#include <crypto/authenc.h> +#include <crypto/des.h> +#include <crypto/sha.h> +#include <crypto/internal/aead.h> +#include <crypto/scatterwalk.h> +#include <crypto/gcm.h> + +#include "nitrox_dev.h" +#include "nitrox_common.h" +#include "nitrox_req.h" + +#define GCM_AES_SALT_SIZE 4 + +/** + * struct nitrox_crypt_params - Params to set nitrox crypto request. + * @cryptlen: Encryption/Decryption data length + * @authlen: Assoc data length + Cryptlen + * @srclen: Input buffer length + * @dstlen: Output buffer length + * @iv: IV data + * @ivsize: IV data length + * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT) + */ +struct nitrox_crypt_params { + unsigned int cryptlen; + unsigned int authlen; + unsigned int srclen; + unsigned int dstlen; + u8 *iv; + int ivsize; + u8 ctrl_arg; +}; + +union gph_p3 { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u16 iv_offset : 8; + u16 auth_offset : 8; +#else + u16 auth_offset : 8; + u16 iv_offset : 8; +#endif + }; + u16 param; +}; + +static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + int aes_keylen; + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct flexi_crypto_context *fctx; + union fc_ctx_flags flags; + + aes_keylen = flexi_aes_keylen(keylen); + if (aes_keylen < 0) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + /* fill crypto context */ + fctx = nctx->u.fctx; + flags.f = be64_to_cpu(fctx->flags.f); + flags.w0.aes_keylen = aes_keylen; + fctx->flags.f = cpu_to_be64(flags.f); + + /* copy enc key to context */ + memset(&fctx->crypto, 0, sizeof(fctx->crypto)); + memcpy(fctx->crypto.u.key, key, keylen); + + return 0; +} + +static int nitrox_aead_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct flexi_crypto_context *fctx = nctx->u.fctx; + union fc_ctx_flags flags; + + flags.f = be64_to_cpu(fctx->flags.f); + flags.w0.mac_len = authsize; + fctx->flags.f = cpu_to_be64(flags.f); + + aead->authsize = authsize; + + return 0; +} + +static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize, + int buflen) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + int nents = sg_nents_for_len(areq->src, buflen) + 1; + int ret; + + if (nents < 0) + return nents; + + /* Allocate buffer to hold IV and input scatterlist array */ + ret = alloc_src_req_buf(nkreq, nents, ivsize); + if (ret) + return ret; + + nitrox_creq_copy_iv(nkreq->src, iv, ivsize); + nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen); + + return 0; +} + +static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + int nents = sg_nents_for_len(areq->dst, buflen) + 3; + int ret; + + if (nents < 0) + return nents; + + /* Allocate buffer to hold ORH, COMPLETION and output scatterlist + * array + */ + ret = alloc_dst_req_buf(nkreq, nents); + if (ret) + return ret; + + nitrox_creq_set_orh(nkreq); + nitrox_creq_set_comp(nkreq); + nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen); + + return 0; +} + +static void free_src_sglist(struct aead_request *areq) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + + kfree(nkreq->src); +} + +static void free_dst_sglist(struct aead_request *areq) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + + kfree(nkreq->dst); +} + +static int nitrox_set_creq(struct aead_request *areq, + struct nitrox_crypt_params *params) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + struct se_crypto_request *creq = &nkreq->creq; + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + union gph_p3 param3; + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + int ret; + + creq->flags = areq->base.flags; + creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + + creq->ctrl.value = 0; + creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; + creq->ctrl.s.arg = params->ctrl_arg; + + creq->gph.param0 = cpu_to_be16(params->cryptlen); + creq->gph.param1 = cpu_to_be16(params->authlen); + creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen); + param3.iv_offset = 0; + param3.auth_offset = params->ivsize; + creq->gph.param3 = cpu_to_be16(param3.param); + + creq->ctx_handle = nctx->u.ctx_handle; + creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); + + ret = alloc_src_sglist(areq, params->iv, params->ivsize, + params->srclen); + if (ret) + return ret; + + ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen); + if (ret) { + free_src_sglist(areq); + return ret; + } + + return 0; +} + +static void nitrox_aead_callback(void *arg, int err) +{ + struct aead_request *areq = arg; + + free_src_sglist(areq); + free_dst_sglist(areq); + if (err) { + pr_err_ratelimited("request failed status 0x%0x\n", err); + err = -EINVAL; + } + + areq->base.complete(&areq->base, err); +} + +static int nitrox_aes_gcm_enc(struct aead_request *areq) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + struct se_crypto_request *creq = &nkreq->creq; + struct flexi_crypto_context *fctx = nctx->u.fctx; + struct nitrox_crypt_params params; + int ret; + + memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); + + memset(¶ms, 0, sizeof(params)); + params.cryptlen = areq->cryptlen; + params.authlen = areq->assoclen + params.cryptlen; + params.srclen = params.authlen; + params.dstlen = params.srclen + aead->authsize; + params.iv = &areq->iv[GCM_AES_SALT_SIZE]; + params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; + params.ctrl_arg = ENCRYPT; + ret = nitrox_set_creq(areq, ¶ms); + if (ret) + return ret; + + /* send the crypto request */ + return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, + areq); +} + +static int nitrox_aes_gcm_dec(struct aead_request *areq) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + struct se_crypto_request *creq = &nkreq->creq; + struct flexi_crypto_context *fctx = nctx->u.fctx; + struct nitrox_crypt_params params; + int ret; + + memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); + + memset(¶ms, 0, sizeof(params)); + params.cryptlen = areq->cryptlen - aead->authsize; + params.authlen = areq->assoclen + params.cryptlen; + params.srclen = areq->cryptlen + areq->assoclen; + params.dstlen = params.srclen - aead->authsize; + params.iv = &areq->iv[GCM_AES_SALT_SIZE]; + params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; + params.ctrl_arg = DECRYPT; + ret = nitrox_set_creq(areq, ¶ms); + if (ret) + return ret; + + /* send the crypto request */ + return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, + areq); +} + +static int nitrox_aead_init(struct crypto_aead *aead) +{ + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct crypto_ctx_hdr *chdr; + + /* get the first device */ + nctx->ndev = nitrox_get_first_device(); + if (!nctx->ndev) + return -ENODEV; + + /* allocate nitrox crypto context */ + chdr = crypto_alloc_context(nctx->ndev); + if (!chdr) { + nitrox_put_device(nctx->ndev); + return -ENOMEM; + } + nctx->chdr = chdr; + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + + sizeof(struct ctx_hdr)); + nctx->u.fctx->flags.f = 0; + + return 0; +} + +static int nitrox_aes_gcm_init(struct crypto_aead *aead) +{ + int ret; + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + union fc_ctx_flags *flags; + + ret = nitrox_aead_init(aead); + if (ret) + return ret; + + flags = &nctx->u.fctx->flags; + flags->w0.cipher_type = CIPHER_AES_GCM; + flags->w0.hash_type = AUTH_NULL; + flags->w0.iv_source = IV_FROM_DPTR; + /* ask microcode to calculate ipad/opad */ + flags->w0.auth_input_type = 1; + flags->f = be64_to_cpu(flags->f); + + crypto_aead_set_reqsize(aead, sizeof(struct aead_request) + + sizeof(struct nitrox_kcrypt_request)); + + return 0; +} + +static void nitrox_aead_exit(struct crypto_aead *aead) +{ + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + + /* free the nitrox crypto context */ + if (nctx->u.ctx_handle) { + struct flexi_crypto_context *fctx = nctx->u.fctx; + + memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys)); + memzero_explicit(&fctx->auth, sizeof(struct auth_keys)); + crypto_free_context((void *)nctx->chdr); + } + nitrox_put_device(nctx->ndev); + + nctx->u.ctx_handle = 0; + nctx->ndev = NULL; +} + +static struct aead_alg nitrox_aeads[] = { { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "n5_aes_gcm", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .setkey = nitrox_aes_gcm_setkey, + .setauthsize = nitrox_aead_setauthsize, + .encrypt = nitrox_aes_gcm_enc, + .decrypt = nitrox_aes_gcm_dec, + .init = nitrox_aes_gcm_init, + .exit = nitrox_aead_exit, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, +} }; + +int nitrox_register_aeads(void) +{ + return crypto_register_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads)); +} + +void nitrox_unregister_aeads(void) +{ + crypto_unregister_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads)); +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c index 2ae6124e5da6..d646ae5f29b0 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_algs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c @@ -1,458 +1,24 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/printk.h> - -#include <crypto/aes.h> -#include <crypto/skcipher.h> -#include <crypto/ctr.h> -#include <crypto/des.h> -#include <crypto/xts.h> - -#include "nitrox_dev.h" #include "nitrox_common.h" -#include "nitrox_req.h" - -#define PRIO 4001 - -struct nitrox_cipher { - const char *name; - enum flexi_cipher value; -}; - -/** - * supported cipher list - */ -static const struct nitrox_cipher flexi_cipher_table[] = { - { "null", CIPHER_NULL }, - { "cbc(des3_ede)", CIPHER_3DES_CBC }, - { "ecb(des3_ede)", CIPHER_3DES_ECB }, - { "cbc(aes)", CIPHER_AES_CBC }, - { "ecb(aes)", CIPHER_AES_ECB }, - { "cfb(aes)", CIPHER_AES_CFB }, - { "rfc3686(ctr(aes))", CIPHER_AES_CTR }, - { "xts(aes)", CIPHER_AES_XTS }, - { "cts(cbc(aes))", CIPHER_AES_CBC_CTS }, - { NULL, CIPHER_INVALID } -}; - -static enum flexi_cipher flexi_cipher_type(const char *name) -{ - const struct nitrox_cipher *cipher = flexi_cipher_table; - - while (cipher->name) { - if (!strcmp(cipher->name, name)) - break; - cipher++; - } - return cipher->value; -} - -static int flexi_aes_keylen(int keylen) -{ - int aes_keylen; - - switch (keylen) { - case AES_KEYSIZE_128: - aes_keylen = 1; - break; - case AES_KEYSIZE_192: - aes_keylen = 2; - break; - case AES_KEYSIZE_256: - aes_keylen = 3; - break; - default: - aes_keylen = -EINVAL; - break; - } - return aes_keylen; -} - -static int nitrox_skcipher_init(struct crypto_skcipher *tfm) -{ - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); - void *fctx; - - /* get the first device */ - nctx->ndev = nitrox_get_first_device(); - if (!nctx->ndev) - return -ENODEV; - - /* allocate nitrox crypto context */ - fctx = crypto_alloc_context(nctx->ndev); - if (!fctx) { - nitrox_put_device(nctx->ndev); - return -ENOMEM; - } - nctx->u.ctx_handle = (uintptr_t)fctx; - crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + - sizeof(struct nitrox_kcrypt_request)); - return 0; -} - -static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) -{ - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); - - /* free the nitrox crypto context */ - if (nctx->u.ctx_handle) { - struct flexi_crypto_context *fctx = nctx->u.fctx; - - memset(&fctx->crypto, 0, sizeof(struct crypto_keys)); - memset(&fctx->auth, 0, sizeof(struct auth_keys)); - crypto_free_context((void *)fctx); - } - nitrox_put_device(nctx->ndev); - - nctx->u.ctx_handle = 0; - nctx->ndev = NULL; -} -static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher, - int aes_keylen, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); - struct flexi_crypto_context *fctx; - enum flexi_cipher cipher_type; - const char *name; - - name = crypto_tfm_alg_name(tfm); - cipher_type = flexi_cipher_type(name); - if (unlikely(cipher_type == CIPHER_INVALID)) { - pr_err("unsupported cipher: %s\n", name); - return -EINVAL; - } - - /* fill crypto context */ - fctx = nctx->u.fctx; - fctx->flags = 0; - fctx->w0.cipher_type = cipher_type; - fctx->w0.aes_keylen = aes_keylen; - fctx->w0.iv_source = IV_FROM_DPTR; - fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0); - /* copy the key to context */ - memcpy(fctx->crypto.u.key, key, keylen); - - return 0; -} - -static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, - unsigned int keylen) +int nitrox_crypto_register(void) { - int aes_keylen; + int err; - aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); -} + err = nitrox_register_skciphers(); + if (err) + return err; -static void nitrox_skcipher_callback(struct skcipher_request *skreq, - int err) -{ + err = nitrox_register_aeads(); if (err) { - pr_err_ratelimited("request failed status 0x%0x\n", err); - err = -EINVAL; - } - skcipher_request_complete(skreq, err); -} - -static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) -{ - struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); - struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); - int ivsize = crypto_skcipher_ivsize(cipher); - struct se_crypto_request *creq; - - creq = &nkreq->creq; - creq->flags = skreq->base.flags; - creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC; - - /* fill the request */ - creq->ctrl.value = 0; - creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; - creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT); - /* param0: length of the data to be encrypted */ - creq->gph.param0 = cpu_to_be16(skreq->cryptlen); - creq->gph.param1 = 0; - /* param2: encryption data offset */ - creq->gph.param2 = cpu_to_be16(ivsize); - creq->gph.param3 = 0; - - creq->ctx_handle = nctx->u.ctx_handle; - creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); - - /* copy the iv */ - memcpy(creq->iv, skreq->iv, ivsize); - creq->ivsize = ivsize; - creq->src = skreq->src; - creq->dst = skreq->dst; - - nkreq->nctx = nctx; - nkreq->skreq = skreq; - - /* send the crypto request */ - return nitrox_process_se_request(nctx->ndev, creq, - nitrox_skcipher_callback, skreq); -} - -static int nitrox_aes_encrypt(struct skcipher_request *skreq) -{ - return nitrox_skcipher_crypt(skreq, true); -} - -static int nitrox_aes_decrypt(struct skcipher_request *skreq) -{ - return nitrox_skcipher_crypt(skreq, false); -} - -static int nitrox_3des_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - if (keylen != DES3_EDE_KEY_SIZE) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return nitrox_skcipher_setkey(cipher, 0, key, keylen); -} - -static int nitrox_3des_encrypt(struct skcipher_request *skreq) -{ - return nitrox_skcipher_crypt(skreq, true); -} - -static int nitrox_3des_decrypt(struct skcipher_request *skreq) -{ - return nitrox_skcipher_crypt(skreq, false); -} - -static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); - struct flexi_crypto_context *fctx; - int aes_keylen, ret; - - ret = xts_check_key(tfm, key, keylen); - if (ret) - return ret; - - keylen /= 2; - - aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; + nitrox_unregister_skciphers(); + return err; } - fctx = nctx->u.fctx; - /* copy KEY2 */ - memcpy(fctx->auth.u.key2, (key + keylen), keylen); - - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); -} - -static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); - struct flexi_crypto_context *fctx; - int aes_keylen; - - if (keylen < CTR_RFC3686_NONCE_SIZE) - return -EINVAL; - - fctx = nctx->u.fctx; - - memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE), - CTR_RFC3686_NONCE_SIZE); - - keylen -= CTR_RFC3686_NONCE_SIZE; - - aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); -} - -static struct skcipher_alg nitrox_skciphers[] = { { - .base = { - .cra_name = "cbc(aes)", - .cra_driver_name = "n5_cbc(aes)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "ecb(aes)", - .cra_driver_name = "n5_ecb(aes)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "cfb(aes)", - .cra_driver_name = "n5_cfb(aes)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "xts(aes)", - .cra_driver_name = "n5_xts(aes)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = 2 * AES_MIN_KEY_SIZE, - .max_keysize = 2 * AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_xts_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "rfc3686(ctr(aes))", - .cra_driver_name = "n5_rfc3686(ctr(aes))", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, - .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, - .ivsize = CTR_RFC3686_IV_SIZE, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, - .setkey = nitrox_aes_ctr_rfc3686_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, -}, { - .base = { - .cra_name = "cts(cbc(aes))", - .cra_driver_name = "n5_cts(cbc(aes))", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "cbc(des3_ede)", - .cra_driver_name = "n5_cbc(des3_ede)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = nitrox_3des_setkey, - .encrypt = nitrox_3des_encrypt, - .decrypt = nitrox_3des_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "ecb(des3_ede)", - .cra_driver_name = "n5_ecb(des3_ede)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = nitrox_3des_setkey, - .encrypt = nitrox_3des_encrypt, - .decrypt = nitrox_3des_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -} - -}; - -int nitrox_crypto_register(void) -{ - return crypto_register_skciphers(nitrox_skciphers, - ARRAY_SIZE(nitrox_skciphers)); + return 0; } void nitrox_crypto_unregister(void) { - crypto_unregister_skciphers(nitrox_skciphers, - ARRAY_SIZE(nitrox_skciphers)); + nitrox_unregister_aeads(); + nitrox_unregister_skciphers(); } diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h index 863143a8336b..e4be69d7e6e5 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_common.h +++ b/drivers/crypto/cavium/nitrox/nitrox_common.h @@ -7,6 +7,10 @@ int nitrox_crypto_register(void); void nitrox_crypto_unregister(void); +int nitrox_register_aeads(void); +void nitrox_unregister_aeads(void); +int nitrox_register_skciphers(void); +void nitrox_unregister_skciphers(void); void *crypto_alloc_context(struct nitrox_device *ndev); void crypto_free_context(void *ctx); struct nitrox_device *nitrox_get_first_device(void); @@ -19,7 +23,7 @@ void pkt_slc_resp_tasklet(unsigned long data); int nitrox_process_se_request(struct nitrox_device *ndev, struct se_crypto_request *req, completion_t cb, - struct skcipher_request *skreq); + void *cb_arg); void backlog_qflush_work(struct work_struct *work); diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h index 1ad27b1a87c5..a2a452642b38 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_csr.h +++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h @@ -54,7 +54,13 @@ #define NPS_STATS_PKT_DMA_WR_CNT 0x1000190 /* NPS packet registers */ -#define NPS_PKT_INT 0x1040018 +#define NPS_PKT_INT 0x1040018 +#define NPS_PKT_MBOX_INT_LO 0x1040020 +#define NPS_PKT_MBOX_INT_LO_ENA_W1C 0x1040030 +#define NPS_PKT_MBOX_INT_LO_ENA_W1S 0x1040038 +#define NPS_PKT_MBOX_INT_HI 0x1040040 +#define NPS_PKT_MBOX_INT_HI_ENA_W1C 0x1040050 +#define NPS_PKT_MBOX_INT_HI_ENA_W1S 0x1040058 #define NPS_PKT_IN_RERR_HI 0x1040108 #define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120 #define NPS_PKT_IN_RERR_LO 0x1040128 @@ -74,6 +80,10 @@ #define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240 #define NPS_PKT_SLC_ERR_TYPE 0x1040248 #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260 +/* Mailbox PF->VF PF Accessible Data registers */ +#define NPS_PKT_MBOX_PF_VF_PFDATAX(_i) (0x1040800 + ((_i) * 0x8)) +#define NPS_PKT_MBOX_VF_PF_PFDATAX(_i) (0x1040C00 + ((_i) * 0x8)) + #define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000)) #define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000)) #define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000)) diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c index 5f3cd5fafe04..0196b992280f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c @@ -13,18 +13,7 @@ static int firmware_show(struct seq_file *s, void *v) return 0; } -static int firmware_open(struct inode *inode, struct file *file) -{ - return single_open(file, firmware_show, inode->i_private); -} - -static const struct file_operations firmware_fops = { - .owner = THIS_MODULE, - .open = firmware_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(firmware); static int device_show(struct seq_file *s, void *v) { @@ -41,18 +30,7 @@ static int device_show(struct seq_file *s, void *v) return 0; } -static int nitrox_open(struct inode *inode, struct file *file) -{ - return single_open(file, device_show, inode->i_private); -} - -static const struct file_operations nitrox_fops = { - .owner = THIS_MODULE, - .open = nitrox_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(device); static int stats_show(struct seq_file *s, void *v) { @@ -69,18 +47,7 @@ static int stats_show(struct seq_file *s, void *v) return 0; } -static int nitrox_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, stats_show, inode->i_private); -} - -static const struct file_operations nitrox_stats_fops = { - .owner = THIS_MODULE, - .open = nitrox_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(stats); void nitrox_debugfs_exit(struct nitrox_device *ndev) { @@ -97,13 +64,16 @@ int nitrox_debugfs_init(struct nitrox_device *ndev) return -ENOMEM; ndev->debugfs_dir = dir; - f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops); + f = debugfs_create_file("firmware", 0400, dir, ndev, + &firmware_fops); if (!f) goto err; - f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops); + f = debugfs_create_file("device", 0400, dir, ndev, + &device_fops); if (!f) goto err; - f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops); + f = debugfs_create_file("stats", 0400, dir, ndev, + &stats_fops); if (!f) goto err; diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h new file mode 100644 index 000000000000..a8d85ffa619c --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef __NITROX_DEBUGFS_H +#define __NITROX_DEBUGFS_H + +#include "nitrox_dev.h" + +#ifdef CONFIG_DEBUG_FS +int nitrox_debugfs_init(struct nitrox_device *ndev); +void nitrox_debugfs_exit(struct nitrox_device *ndev); +#else +static inline int nitrox_debugfs_init(struct nitrox_device *ndev) +{ + return 0; +} + +static inline void nitrox_debugfs_exit(struct nitrox_device *ndev) +{ +} +#endif /* !CONFIG_DEBUG_FS */ + +#endif /* __NITROX_DEBUGFS_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 283e252385fb..0338877b828f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -8,6 +8,8 @@ #include <linux/if.h> #define VERSION_LEN 32 +/* Maximum queues in PF mode */ +#define MAX_PF_QUEUES 64 /** * struct nitrox_cmdq - NITROX command queue @@ -103,6 +105,61 @@ struct nitrox_q_vector { }; }; +/** + * mbox_msg - Mailbox message data + * @type: message type + * @opcode: message opcode + * @data: message data + */ +union mbox_msg { + u64 value; + struct { + u64 type: 2; + u64 opcode: 6; + u64 data: 58; + }; + struct { + u64 type: 2; + u64 opcode: 6; + u64 chipid: 8; + u64 vfid: 8; + } id; +}; + +/** + * nitrox_vfdev - NITROX VF device instance in PF + * @state: VF device state + * @vfno: VF number + * @nr_queues: number of queues enabled in VF + * @ring: ring to communicate with VF + * @msg: Mailbox message data from VF + * @mbx_resp: Mailbox counters + */ +struct nitrox_vfdev { + atomic_t state; + int vfno; + int nr_queues; + int ring; + union mbox_msg msg; + atomic64_t mbx_resp; +}; + +/** + * struct nitrox_iov - SR-IOV information + * @num_vfs: number of VF(s) enabled + * @max_vf_queues: Maximum number of queues allowed for VF + * @vfdev: VF(s) devices + * @pf2vf_wq: workqueue for PF2VF communication + * @msix: MSI-X entry for PF in SR-IOV case + */ +struct nitrox_iov { + int num_vfs; + int max_vf_queues; + struct nitrox_vfdev *vfdev; + struct workqueue_struct *pf2vf_wq; + struct msix_entry msix; +}; + /* * NITROX Device states */ @@ -150,6 +207,9 @@ enum vf_mode { * @ctx_pool: DMA pool for crypto context * @pkt_inq: Packet input rings * @qvec: MSI-X queue vectors information + * @iov: SR-IOV informatin + * @num_vecs: number of MSI-X vectors + * @stats: request statistics * @hw: hardware information * @debugfs_dir: debugfs directory */ @@ -168,13 +228,13 @@ struct nitrox_device { int node; u16 qlen; u16 nr_queues; - int num_vfs; enum vf_mode mode; struct dma_pool *ctx_pool; struct nitrox_cmdq *pkt_inq; struct nitrox_q_vector *qvec; + struct nitrox_iov iov; int num_vecs; struct nitrox_stats stats; @@ -213,17 +273,9 @@ static inline bool nitrox_ready(struct nitrox_device *ndev) return atomic_read(&ndev->state) == __NDEV_READY; } -#ifdef CONFIG_DEBUG_FS -int nitrox_debugfs_init(struct nitrox_device *ndev); -void nitrox_debugfs_exit(struct nitrox_device *ndev); -#else -static inline int nitrox_debugfs_init(struct nitrox_device *ndev) +static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev) { - return 0; + return atomic_read(&vfdev->state) == __NDEV_READY; } -static inline void nitrox_debugfs_exit(struct nitrox_device *ndev) -{ } -#endif - #endif /* __NITROX_DEV_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c index a9b82387cf53..c08d9f33a3b1 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.c +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c @@ -5,10 +5,11 @@ #include "nitrox_csr.h" #define PLL_REF_CLK 50 +#define MAX_CSR_RETRIES 10 /** * emu_enable_cores - Enable EMU cluster cores. - * @ndev: N5 device + * @ndev: NITROX device */ static void emu_enable_cores(struct nitrox_device *ndev) { @@ -33,7 +34,7 @@ static void emu_enable_cores(struct nitrox_device *ndev) /** * nitrox_config_emu_unit - configure EMU unit. - * @ndev: N5 device + * @ndev: NITROX device */ void nitrox_config_emu_unit(struct nitrox_device *ndev) { @@ -63,29 +64,26 @@ void nitrox_config_emu_unit(struct nitrox_device *ndev) static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring) { union nps_pkt_in_instr_ctl pkt_in_ctl; - union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; union nps_pkt_in_done_cnts pkt_in_cnts; + int max_retries = MAX_CSR_RETRIES; u64 offset; + /* step 1: disable the ring, clear enable bit */ offset = NPS_PKT_IN_INSTR_CTLX(ring); - /* disable the ring */ pkt_in_ctl.value = nitrox_read_csr(ndev, offset); pkt_in_ctl.s.enb = 0; nitrox_write_csr(ndev, offset, pkt_in_ctl.value); - usleep_range(100, 150); - /* wait to clear [ENB] */ + /* step 2: wait to clear [ENB] */ + usleep_range(100, 150); do { pkt_in_ctl.value = nitrox_read_csr(ndev, offset); - } while (pkt_in_ctl.s.enb); - - /* clear off door bell counts */ - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring); - pkt_in_dbell.value = 0; - pkt_in_dbell.s.dbell = 0xffffffff; - nitrox_write_csr(ndev, offset, pkt_in_dbell.value); + if (!pkt_in_ctl.s.enb) + break; + udelay(50); + } while (max_retries--); - /* clear done counts */ + /* step 3: clear done counts */ offset = NPS_PKT_IN_DONE_CNTSX(ring); pkt_in_cnts.value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, pkt_in_cnts.value); @@ -95,6 +93,7 @@ static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring) void enable_pkt_input_ring(struct nitrox_device *ndev, int ring) { union nps_pkt_in_instr_ctl pkt_in_ctl; + int max_retries = MAX_CSR_RETRIES; u64 offset; /* 64-byte instruction size */ @@ -107,12 +106,15 @@ void enable_pkt_input_ring(struct nitrox_device *ndev, int ring) /* wait for set [ENB] */ do { pkt_in_ctl.value = nitrox_read_csr(ndev, offset); - } while (!pkt_in_ctl.s.enb); + if (pkt_in_ctl.s.enb) + break; + udelay(50); + } while (max_retries--); } /** * nitrox_config_pkt_input_rings - configure Packet Input Rings - * @ndev: N5 device + * @ndev: NITROX device */ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) { @@ -121,11 +123,14 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) for (i = 0; i < ndev->nr_queues; i++) { struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i]; union nps_pkt_in_instr_rsize pkt_in_rsize; + union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; u64 offset; reset_pkt_input_ring(ndev, i); - /* configure ring base address 16-byte aligned, + /** + * step 4: + * configure ring base address 16-byte aligned, * size and interrupt threshold. */ offset = NPS_PKT_IN_INSTR_BADDRX(i); @@ -141,6 +146,13 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) offset = NPS_PKT_IN_INT_LEVELSX(i); nitrox_write_csr(ndev, offset, 0xffffffff); + /* step 5: clear off door bell counts */ + offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); + pkt_in_dbell.value = 0; + pkt_in_dbell.s.dbell = 0xffffffff; + nitrox_write_csr(ndev, offset, pkt_in_dbell.value); + + /* enable the ring */ enable_pkt_input_ring(ndev, i); } } @@ -149,21 +161,26 @@ static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_ctl pkt_slc_ctl; union nps_pkt_slc_cnts pkt_slc_cnts; + int max_retries = MAX_CSR_RETRIES; u64 offset; - /* disable slc port */ + /* step 1: disable slc port */ offset = NPS_PKT_SLC_CTLX(port); pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); pkt_slc_ctl.s.enb = 0; nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); - usleep_range(100, 150); + /* step 2 */ + usleep_range(100, 150); /* wait to clear [ENB] */ do { pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); - } while (pkt_slc_ctl.s.enb); + if (!pkt_slc_ctl.s.enb) + break; + udelay(50); + } while (max_retries--); - /* clear slc counters */ + /* step 3: clear slc counters */ offset = NPS_PKT_SLC_CNTSX(port); pkt_slc_cnts.value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, pkt_slc_cnts.value); @@ -173,12 +190,12 @@ static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port) void enable_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_ctl pkt_slc_ctl; + int max_retries = MAX_CSR_RETRIES; u64 offset; offset = NPS_PKT_SLC_CTLX(port); pkt_slc_ctl.value = 0; pkt_slc_ctl.s.enb = 1; - /* * 8 trailing 0x00 bytes will be added * to the end of the outgoing packet. @@ -191,23 +208,27 @@ void enable_pkt_solicit_port(struct nitrox_device *ndev, int port) /* wait to set [ENB] */ do { pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); - } while (!pkt_slc_ctl.s.enb); + if (pkt_slc_ctl.s.enb) + break; + udelay(50); + } while (max_retries--); } -static void config_single_pkt_solicit_port(struct nitrox_device *ndev, - int port) +static void config_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_int_levels pkt_slc_int; u64 offset; reset_pkt_solicit_port(ndev, port); + /* step 4: configure interrupt levels */ offset = NPS_PKT_SLC_INT_LEVELSX(port); pkt_slc_int.value = 0; /* time interrupt threshold */ pkt_slc_int.s.timet = 0x3fffff; nitrox_write_csr(ndev, offset, pkt_slc_int.value); + /* enable the solicit port */ enable_pkt_solicit_port(ndev, port); } @@ -216,12 +237,12 @@ void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev) int i; for (i = 0; i < ndev->nr_queues; i++) - config_single_pkt_solicit_port(ndev, i); + config_pkt_solicit_port(ndev, i); } /** * enable_nps_interrupts - enable NPS interrutps - * @ndev: N5 device. + * @ndev: NITROX device. * * This includes NPS core, packet in and slc interrupts. */ @@ -284,8 +305,8 @@ void nitrox_config_pom_unit(struct nitrox_device *ndev) } /** - * nitrox_config_rand_unit - enable N5 random number unit - * @ndev: N5 device + * nitrox_config_rand_unit - enable NITROX random number unit + * @ndev: NITROX device */ void nitrox_config_rand_unit(struct nitrox_device *ndev) { @@ -361,6 +382,7 @@ void invalidate_lbc(struct nitrox_device *ndev) { union lbc_inval_ctl lbc_ctl; union lbc_inval_status lbc_stat; + int max_retries = MAX_CSR_RETRIES; u64 offset; /* invalidate LBC */ @@ -370,10 +392,12 @@ void invalidate_lbc(struct nitrox_device *ndev) nitrox_write_csr(ndev, offset, lbc_ctl.value); offset = LBC_INVAL_STATUS; - do { lbc_stat.value = nitrox_read_csr(ndev, offset); - } while (!lbc_stat.s.done); + if (lbc_stat.s.done) + break; + udelay(50); + } while (max_retries--); } void nitrox_config_lbc_unit(struct nitrox_device *ndev) @@ -467,3 +491,31 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev) /* copy partname */ strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname)); } + +void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev) +{ + u64 value = ~0ULL; + u64 reg_addr; + + /* Mailbox interrupt low enable set register */ + reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S; + nitrox_write_csr(ndev, reg_addr, value); + + /* Mailbox interrupt high enable set register */ + reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S; + nitrox_write_csr(ndev, reg_addr, value); +} + +void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev) +{ + u64 value = ~0ULL; + u64 reg_addr; + + /* Mailbox interrupt low enable clear register */ + reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C; + nitrox_write_csr(ndev, reg_addr, value); + + /* Mailbox interrupt high enable clear register */ + reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C; + nitrox_write_csr(ndev, reg_addr, value); +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h index 489ee64c119e..d6606418ba38 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.h +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h @@ -19,5 +19,7 @@ void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode); void nitrox_get_hwinfo(struct nitrox_device *ndev); +void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev); +void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev); #endif /* __NITROX_HAL_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c index 88a77b8fb3fb..3dec570a190a 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c @@ -7,12 +7,14 @@ #include "nitrox_csr.h" #include "nitrox_common.h" #include "nitrox_hal.h" +#include "nitrox_mbx.h" /** * One vector for each type of ring * - NPS packet ring, AQMQ ring and ZQMQ ring */ #define NR_RING_VECTORS 3 +#define NR_NON_RING_VECTORS 1 /* base entry for packet ring/port */ #define PKT_RING_MSIX_BASE 0 #define NON_RING_MSIX_BASE 192 @@ -219,7 +221,8 @@ static void nps_core_int_tasklet(unsigned long data) */ static irqreturn_t nps_core_int_isr(int irq, void *data) { - struct nitrox_device *ndev = data; + struct nitrox_q_vector *qvec = data; + struct nitrox_device *ndev = qvec->ndev; union nps_core_int_active core_int; core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE); @@ -245,6 +248,10 @@ static irqreturn_t nps_core_int_isr(int irq, void *data) if (core_int.s.bmi) clear_bmi_err_intr(ndev); + /* Mailbox interrupt */ + if (core_int.s.mbox) + nitrox_pf2vf_mbox_handler(ndev); + /* If more work callback the ISR, set resend */ core_int.s.resend = 1; nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value); @@ -275,6 +282,7 @@ void nitrox_unregister_interrupts(struct nitrox_device *ndev) qvec->valid = false; } kfree(ndev->qvec); + ndev->qvec = NULL; pci_free_irq_vectors(pdev); } @@ -321,6 +329,7 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) if (qvec->ring >= ndev->nr_queues) break; + qvec->cmdq = &ndev->pkt_inq[qvec->ring]; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring); /* get the vector number */ vec = pci_irq_vector(pdev, i); @@ -335,13 +344,13 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet, (unsigned long)qvec); - qvec->cmdq = &ndev->pkt_inq[qvec->ring]; qvec->valid = true; } /* request irqs for non ring vectors */ i = NON_RING_MSIX_BASE; qvec = &ndev->qvec[i]; + qvec->ndev = ndev; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i); /* get the vector number */ @@ -356,7 +365,6 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet, (unsigned long)qvec); - qvec->ndev = ndev; qvec->valid = true; return 0; @@ -365,3 +373,81 @@ irq_fail: nitrox_unregister_interrupts(ndev); return ret; } + +void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev) +{ + struct pci_dev *pdev = ndev->pdev; + int i; + + for (i = 0; i < ndev->num_vecs; i++) { + struct nitrox_q_vector *qvec; + int vec; + + qvec = ndev->qvec + i; + if (!qvec->valid) + continue; + + vec = ndev->iov.msix.vector; + irq_set_affinity_hint(vec, NULL); + free_irq(vec, qvec); + + tasklet_disable(&qvec->resp_tasklet); + tasklet_kill(&qvec->resp_tasklet); + qvec->valid = false; + } + kfree(ndev->qvec); + ndev->qvec = NULL; + pci_disable_msix(pdev); +} + +int nitrox_sriov_register_interupts(struct nitrox_device *ndev) +{ + struct pci_dev *pdev = ndev->pdev; + struct nitrox_q_vector *qvec; + int vec, cpu; + int ret; + + /** + * only non ring vectors i.e Entry 192 is available + * for PF in SR-IOV mode. + */ + ndev->iov.msix.entry = NON_RING_MSIX_BASE; + ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS); + if (ret) { + dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n", + NON_RING_MSIX_BASE); + return ret; + } + + qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL); + if (!qvec) { + pci_disable_msix(pdev); + return -ENOMEM; + } + qvec->ndev = ndev; + + ndev->qvec = qvec; + ndev->num_vecs = NR_NON_RING_VECTORS; + snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", + NON_RING_MSIX_BASE); + + vec = ndev->iov.msix.vector; + ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec); + if (ret) { + dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", + NON_RING_MSIX_BASE); + goto iov_irq_fail; + } + cpu = num_online_cpus(); + irq_set_affinity_hint(vec, get_cpu_mask(cpu)); + + tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet, + (unsigned long)qvec); + qvec->valid = true; + + return 0; + +iov_irq_fail: + nitrox_sriov_unregister_interrupts(ndev); + return ret; +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h index 63418a6cc52c..1062c9336c1f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.h +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h @@ -6,5 +6,7 @@ int nitrox_register_interrupts(struct nitrox_device *ndev); void nitrox_unregister_interrupts(struct nitrox_device *ndev); +int nitrox_sriov_register_interupts(struct nitrox_device *ndev); +void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev); #endif /* __NITROX_ISR_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 2260efa42308..9138bae12521 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev) void *crypto_alloc_context(struct nitrox_device *ndev) { struct ctx_hdr *ctx; + struct crypto_ctx_hdr *chdr; void *vaddr; dma_addr_t dma; + chdr = kmalloc(sizeof(*chdr), GFP_KERNEL); + if (!chdr) + return NULL; + vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma); - if (!vaddr) + if (!vaddr) { + kfree(chdr); return NULL; + } /* fill meta data */ ctx = vaddr; @@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev) ctx->dma = dma; ctx->ctx_dma = dma + sizeof(struct ctx_hdr); - return ((u8 *)vaddr + sizeof(struct ctx_hdr)); + chdr->pool = ndev->ctx_pool; + chdr->dma = dma; + chdr->vaddr = vaddr; + + return chdr; } /** @@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev) */ void crypto_free_context(void *ctx) { - struct ctx_hdr *ctxp; + struct crypto_ctx_hdr *ctxp; if (!ctx) return; - ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr)); - dma_pool_free(ctxp->pool, ctxp, ctxp->dma); + ctxp = ctx; + dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma); + kfree(ctxp); } /** diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index 6595c95af9f1..014e9863c20e 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -1,6 +1,5 @@ #include <linux/aer.h> #include <linux/delay.h> -#include <linux/debugfs.h> #include <linux/firmware.h> #include <linux/list.h> #include <linux/module.h> @@ -13,9 +12,9 @@ #include "nitrox_csr.h" #include "nitrox_hal.h" #include "nitrox_isr.h" +#include "nitrox_debugfs.h" #define CNN55XX_DEV_ID 0x12 -#define MAX_PF_QUEUES 64 #define UCODE_HLEN 48 #define SE_GROUP 0 diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c new file mode 100644 index 000000000000..02ee95064841 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/workqueue.h> + +#include "nitrox_csr.h" +#include "nitrox_hal.h" +#include "nitrox_dev.h" + +#define RING_TO_VFNO(_x, _y) ((_x) / (_y)) + +/** + * mbx_msg_type - Mailbox message types + */ +enum mbx_msg_type { + MBX_MSG_TYPE_NOP, + MBX_MSG_TYPE_REQ, + MBX_MSG_TYPE_ACK, + MBX_MSG_TYPE_NACK, +}; + +/** + * mbx_msg_opcode - Mailbox message opcodes + */ +enum mbx_msg_opcode { + MSG_OP_VF_MODE = 1, + MSG_OP_VF_UP, + MSG_OP_VF_DOWN, + MSG_OP_CHIPID_VFID, +}; + +struct pf2vf_work { + struct nitrox_vfdev *vfdev; + struct nitrox_device *ndev; + struct work_struct pf2vf_resp; +}; + +static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring) +{ + u64 reg_addr; + + reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring); + return nitrox_read_csr(ndev, reg_addr); +} + +static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value, + int ring) +{ + u64 reg_addr; + + reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring); + nitrox_write_csr(ndev, reg_addr, value); +} + +static void pf2vf_send_response(struct nitrox_device *ndev, + struct nitrox_vfdev *vfdev) +{ + union mbox_msg msg; + + msg.value = vfdev->msg.value; + + switch (vfdev->msg.opcode) { + case MSG_OP_VF_MODE: + msg.data = ndev->mode; + break; + case MSG_OP_VF_UP: + vfdev->nr_queues = vfdev->msg.data; + atomic_set(&vfdev->state, __NDEV_READY); + break; + case MSG_OP_CHIPID_VFID: + msg.id.chipid = ndev->idx; + msg.id.vfid = vfdev->vfno; + break; + case MSG_OP_VF_DOWN: + vfdev->nr_queues = 0; + atomic_set(&vfdev->state, __NDEV_NOT_READY); + break; + default: + msg.type = MBX_MSG_TYPE_NOP; + break; + } + + if (msg.type == MBX_MSG_TYPE_NOP) + return; + + /* send ACK to VF */ + msg.type = MBX_MSG_TYPE_ACK; + pf2vf_write_mbox(ndev, msg.value, vfdev->ring); + + vfdev->msg.value = 0; + atomic64_inc(&vfdev->mbx_resp); +} + +static void pf2vf_resp_handler(struct work_struct *work) +{ + struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work, + pf2vf_resp); + struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev; + struct nitrox_device *ndev = pf2vf_resp->ndev; + + switch (vfdev->msg.type) { + case MBX_MSG_TYPE_REQ: + /* process the request from VF */ + pf2vf_send_response(ndev, vfdev); + break; + case MBX_MSG_TYPE_ACK: + case MBX_MSG_TYPE_NACK: + break; + }; + + kfree(pf2vf_resp); +} + +void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev) +{ + struct nitrox_vfdev *vfdev; + struct pf2vf_work *pfwork; + u64 value, reg_addr; + u32 i; + int vfno; + + /* loop for VF(0..63) */ + reg_addr = NPS_PKT_MBOX_INT_LO; + value = nitrox_read_csr(ndev, reg_addr); + for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) { + /* get the vfno from ring */ + vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues); + vfdev = ndev->iov.vfdev + vfno; + vfdev->ring = i; + /* fill the vf mailbox data */ + vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring); + pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC); + if (!pfwork) + continue; + + pfwork->vfdev = vfdev; + pfwork->ndev = ndev; + INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler); + queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); + /* clear the corresponding vf bit */ + nitrox_write_csr(ndev, reg_addr, BIT_ULL(i)); + } + + /* loop for VF(64..127) */ + reg_addr = NPS_PKT_MBOX_INT_HI; + value = nitrox_read_csr(ndev, reg_addr); + for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) { + /* get the vfno from ring */ + vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues); + vfdev = ndev->iov.vfdev + vfno; + vfdev->ring = (i + 64); + /* fill the vf mailbox data */ + vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring); + + pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC); + if (!pfwork) + continue; + + pfwork->vfdev = vfdev; + pfwork->ndev = ndev; + INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler); + queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); + /* clear the corresponding vf bit */ + nitrox_write_csr(ndev, reg_addr, BIT_ULL(i)); + } +} + +int nitrox_mbox_init(struct nitrox_device *ndev) +{ + struct nitrox_vfdev *vfdev; + int i; + + ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs, + sizeof(struct nitrox_vfdev), GFP_KERNEL); + if (!ndev->iov.vfdev) + return -ENOMEM; + + for (i = 0; i < ndev->iov.num_vfs; i++) { + vfdev = ndev->iov.vfdev + i; + vfdev->vfno = i; + } + + /* allocate pf2vf response workqueue */ + ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); + if (!ndev->iov.pf2vf_wq) { + kfree(ndev->iov.vfdev); + return -ENOMEM; + } + /* enable pf2vf mailbox interrupts */ + enable_pf2vf_mbox_interrupts(ndev); + + return 0; +} + +void nitrox_mbox_cleanup(struct nitrox_device *ndev) +{ + /* disable pf2vf mailbox interrupts */ + disable_pf2vf_mbox_interrupts(ndev); + /* destroy workqueue */ + if (ndev->iov.pf2vf_wq) + destroy_workqueue(ndev->iov.pf2vf_wq); + + kfree(ndev->iov.vfdev); + ndev->iov.pf2vf_wq = NULL; + ndev->iov.vfdev = NULL; +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.h b/drivers/crypto/cavium/nitrox/nitrox_mbx.h new file mode 100644 index 000000000000..5008399775a9 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef __NITROX_MBX_H +#define __NITROX_MBX_H + +int nitrox_mbox_init(struct nitrox_device *ndev); +void nitrox_mbox_cleanup(struct nitrox_device *ndev); +void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev); + +#endif /* __NITROX_MBX_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h index d091b6f5f5dd..76c0f0be7233 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_req.h +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h @@ -7,6 +7,9 @@ #include "nitrox_dev.h" +#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL +#define PRIO 4001 + /** * struct gphdr - General purpose Header * @param0: first parameter. @@ -46,13 +49,6 @@ union se_req_ctrl { } s; }; -struct nitrox_sglist { - u16 len; - u16 raz0; - u32 raz1; - dma_addr_t dma; -}; - #define MAX_IV_LEN 16 /** @@ -62,8 +58,10 @@ struct nitrox_sglist { * @ctx_handle: Crypto context handle. * @gph: GP Header * @ctrl: Request Information. - * @in: Input sglist - * @out: Output sglist + * @orh: ORH address + * @comp: completion address + * @src: Input sglist + * @dst: Output sglist */ struct se_crypto_request { u8 opcode; @@ -73,9 +71,8 @@ struct se_crypto_request { struct gphdr gph; union se_req_ctrl ctrl; - - u8 iv[MAX_IV_LEN]; - u16 ivsize; + u64 *orh; + u64 *comp; struct scatterlist *src; struct scatterlist *dst; @@ -110,6 +107,18 @@ enum flexi_cipher { CIPHER_INVALID }; +enum flexi_auth { + AUTH_NULL = 0, + AUTH_MD5, + AUTH_SHA1, + AUTH_SHA2_SHA224, + AUTH_SHA2_SHA256, + AUTH_SHA2_SHA384, + AUTH_SHA2_SHA512, + AUTH_GMAC, + AUTH_INVALID +}; + /** * struct crypto_keys - Crypto keys * @key: Encryption key or KEY1 for AES-XTS @@ -136,6 +145,32 @@ struct auth_keys { u8 opad[64]; }; +union fc_ctx_flags { + __be64 f; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cipher_type : 4; + u64 reserved_59 : 1; + u64 aes_keylen : 2; + u64 iv_source : 1; + u64 hash_type : 4; + u64 reserved_49_51 : 3; + u64 auth_input_type: 1; + u64 mac_len : 8; + u64 reserved_0_39 : 40; +#else + u64 reserved_0_39 : 40; + u64 mac_len : 8; + u64 auth_input_type: 1; + u64 reserved_49_51 : 3; + u64 hash_type : 4; + u64 iv_source : 1; + u64 aes_keylen : 2; + u64 reserved_59 : 1; + u64 cipher_type : 4; +#endif + } w0; +}; /** * struct flexi_crypto_context - Crypto context * @cipher_type: Encryption cipher type @@ -150,49 +185,30 @@ struct auth_keys { * @auth: Authentication keys */ struct flexi_crypto_context { - union { - __be64 flags; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 cipher_type : 4; - u64 reserved_59 : 1; - u64 aes_keylen : 2; - u64 iv_source : 1; - u64 hash_type : 4; - u64 reserved_49_51 : 3; - u64 auth_input_type: 1; - u64 mac_len : 8; - u64 reserved_0_39 : 40; -#else - u64 reserved_0_39 : 40; - u64 mac_len : 8; - u64 auth_input_type: 1; - u64 reserved_49_51 : 3; - u64 hash_type : 4; - u64 iv_source : 1; - u64 aes_keylen : 2; - u64 reserved_59 : 1; - u64 cipher_type : 4; -#endif - } w0; - }; - + union fc_ctx_flags flags; struct crypto_keys crypto; struct auth_keys auth; }; +struct crypto_ctx_hdr { + struct dma_pool *pool; + dma_addr_t dma; + void *vaddr; +}; + struct nitrox_crypto_ctx { struct nitrox_device *ndev; union { u64 ctx_handle; struct flexi_crypto_context *fctx; } u; + struct crypto_ctx_hdr *chdr; }; struct nitrox_kcrypt_request { struct se_crypto_request creq; - struct nitrox_crypto_ctx *nctx; - struct skcipher_request *skreq; + u8 *src; + u8 *dst; }; /** @@ -369,26 +385,19 @@ struct nitrox_sgcomp { /* * strutct nitrox_sgtable - SG list information - * @map_cnt: Number of buffers mapped - * @nr_comp: Number of sglist components + * @sgmap_cnt: Number of buffers mapped * @total_bytes: Total bytes in sglist. - * @len: Total sglist components length. - * @dma: DMA address of sglist component. - * @dir: DMA direction. - * @buf: crypto request buffer. - * @sglist: SG list of input/output buffers. + * @sgcomp_len: Total sglist components length. + * @sgcomp_dma: DMA address of sglist component. + * @sg: crypto request buffer. * @sgcomp: sglist component for NITROX. */ struct nitrox_sgtable { - u8 map_bufs_cnt; - u8 nr_sgcomp; + u8 sgmap_cnt; u16 total_bytes; - u32 len; - dma_addr_t dma; - enum dma_data_direction dir; - - struct scatterlist *buf; - struct nitrox_sglist *sglist; + u32 sgcomp_len; + dma_addr_t sgcomp_dma; + struct scatterlist *sg; struct nitrox_sgcomp *sgcomp; }; @@ -398,13 +407,11 @@ struct nitrox_sgtable { #define COMP_HLEN 8 struct resp_hdr { - u64 orh; - dma_addr_t orh_dma; - u64 completion; - dma_addr_t completion_dma; + u64 *orh; + u64 *completion; }; -typedef void (*completion_t)(struct skcipher_request *skreq, int err); +typedef void (*completion_t)(void *arg, int err); /** * struct nitrox_softreq - Represents the NIROX Request. @@ -427,7 +434,6 @@ struct nitrox_softreq { u32 flags; gfp_t gfp; atomic_t status; - bool inplace; struct nitrox_device *ndev; struct nitrox_cmdq *cmdq; @@ -440,7 +446,201 @@ struct nitrox_softreq { unsigned long tstamp; completion_t callback; - struct skcipher_request *skreq; + void *cb_arg; }; +static inline int flexi_aes_keylen(int keylen) +{ + int aes_keylen; + + switch (keylen) { + case AES_KEYSIZE_128: + aes_keylen = 1; + break; + case AES_KEYSIZE_192: + aes_keylen = 2; + break; + case AES_KEYSIZE_256: + aes_keylen = 3; + break; + default: + aes_keylen = -EINVAL; + break; + } + return aes_keylen; +} + +static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp) +{ + size_t size; + + size = sizeof(struct scatterlist) * nents; + size += extralen; + + return kzalloc(size, gfp); +} + +/** + * create_single_sg - Point SG entry to the data + * @sg: Destination SG list + * @buf: Data + * @buflen: Data length + * + * Returns next free entry in the destination SG list + **/ +static inline struct scatterlist *create_single_sg(struct scatterlist *sg, + void *buf, int buflen) +{ + sg_set_buf(sg, buf, buflen); + sg++; + return sg; +} + +/** + * create_multi_sg - Create multiple sg entries with buflen data length from + * source sglist + * @to_sg: Destination SG list + * @from_sg: Source SG list + * @buflen: Data length + * + * Returns next free entry in the destination SG list + **/ +static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg, + struct scatterlist *from_sg, + int buflen) +{ + struct scatterlist *sg = to_sg; + unsigned int sglen; + + for (; buflen; buflen -= sglen) { + sglen = from_sg->length; + if (sglen > buflen) + sglen = buflen; + + sg_set_buf(sg, sg_virt(from_sg), sglen); + from_sg = sg_next(from_sg); + sg++; + } + + return sg; +} + +static inline void set_orh_value(u64 *orh) +{ + WRITE_ONCE(*orh, PENDING_SIG); +} + +static inline void set_comp_value(u64 *comp) +{ + WRITE_ONCE(*comp, PENDING_SIG); +} + +static inline int alloc_src_req_buf(struct nitrox_kcrypt_request *nkreq, + int nents, int ivsize) +{ + struct se_crypto_request *creq = &nkreq->creq; + + nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp); + if (!nkreq->src) + return -ENOMEM; + + return 0; +} + +static inline void nitrox_creq_copy_iv(char *dst, char *src, int size) +{ + memcpy(dst, src, size); +} + +static inline struct scatterlist *nitrox_creq_src_sg(char *iv, int ivsize) +{ + return (struct scatterlist *)(iv + ivsize); +} + +static inline void nitrox_creq_set_src_sg(struct nitrox_kcrypt_request *nkreq, + int nents, int ivsize, + struct scatterlist *src, int buflen) +{ + char *iv = nkreq->src; + struct scatterlist *sg; + struct se_crypto_request *creq = &nkreq->creq; + + creq->src = nitrox_creq_src_sg(iv, ivsize); + sg = creq->src; + sg_init_table(sg, nents); + + /* Input format: + * +----+----------------+ + * | IV | SRC sg entries | + * +----+----------------+ + */ + + /* IV */ + sg = create_single_sg(sg, iv, ivsize); + /* SRC entries */ + create_multi_sg(sg, src, buflen); +} + +static inline int alloc_dst_req_buf(struct nitrox_kcrypt_request *nkreq, + int nents) +{ + int extralen = ORH_HLEN + COMP_HLEN; + struct se_crypto_request *creq = &nkreq->creq; + + nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp); + if (!nkreq->dst) + return -ENOMEM; + + return 0; +} + +static inline void nitrox_creq_set_orh(struct nitrox_kcrypt_request *nkreq) +{ + struct se_crypto_request *creq = &nkreq->creq; + + creq->orh = (u64 *)(nkreq->dst); + set_orh_value(creq->orh); +} + +static inline void nitrox_creq_set_comp(struct nitrox_kcrypt_request *nkreq) +{ + struct se_crypto_request *creq = &nkreq->creq; + + creq->comp = (u64 *)(nkreq->dst + ORH_HLEN); + set_comp_value(creq->comp); +} + +static inline struct scatterlist *nitrox_creq_dst_sg(char *dst) +{ + return (struct scatterlist *)(dst + ORH_HLEN + COMP_HLEN); +} + +static inline void nitrox_creq_set_dst_sg(struct nitrox_kcrypt_request *nkreq, + int nents, int ivsize, + struct scatterlist *dst, int buflen) +{ + struct se_crypto_request *creq = &nkreq->creq; + struct scatterlist *sg; + char *iv = nkreq->src; + + creq->dst = nitrox_creq_dst_sg(nkreq->dst); + sg = creq->dst; + sg_init_table(sg, nents); + + /* Output format: + * +-----+----+----------------+-----------------+ + * | ORH | IV | DST sg entries | COMPLETION Bytes| + * +-----+----+----------------+-----------------+ + */ + + /* ORH */ + sg = create_single_sg(sg, creq->orh, ORH_HLEN); + /* IV */ + sg = create_single_sg(sg, iv, ivsize); + /* DST entries */ + sg = create_multi_sg(sg, dst, buflen); + /* COMPLETION Bytes */ + create_single_sg(sg, creq->comp, COMP_HLEN); +} + #endif /* __NITROX_REQ_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index 3987cd84c033..e34e4df8fd24 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -13,7 +13,6 @@ #define FDATA_SIZE 32 /* Base destination port for the solicited requests */ #define SOLICIT_BASE_DPORT 256 -#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL #define REQ_NOT_POSTED 1 #define REQ_BACKLOG 2 @@ -52,58 +51,26 @@ static inline int incr_index(int index, int count, int max) return index; } -/** - * dma_free_sglist - unmap and free the sg lists. - * @ndev: N5 device - * @sgtbl: SG table - */ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) { struct nitrox_device *ndev = sr->ndev; struct device *dev = DEV(ndev); - struct nitrox_sglist *sglist; - - /* unmap in sgbuf */ - sglist = sr->in.sglist; - if (!sglist) - goto out_unmap; - - /* unmap iv */ - dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL); - /* unmpa src sglist */ - dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir); - /* unamp gather component */ - dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE); - kfree(sr->in.sglist); + + + dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, + DMA_TO_DEVICE); kfree(sr->in.sgcomp); - sr->in.sglist = NULL; - sr->in.buf = NULL; - sr->in.map_bufs_cnt = 0; - -out_unmap: - /* unmap out sgbuf */ - sglist = sr->out.sglist; - if (!sglist) - return; - - /* unmap orh */ - dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); - - /* unmap dst sglist */ - if (!sr->inplace) { - dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3), - sr->out.dir); - } - /* unmap completion */ - dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); + sr->in.sg = NULL; + sr->in.sgmap_cnt = 0; - /* unmap scatter component */ - dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE); - kfree(sr->out.sglist); + dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt, + DMA_BIDIRECTIONAL); + dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, + DMA_TO_DEVICE); kfree(sr->out.sgcomp); - sr->out.sglist = NULL; - sr->out.buf = NULL; - sr->out.map_bufs_cnt = 0; + sr->out.sg = NULL; + sr->out.sgmap_cnt = 0; } static void softreq_destroy(struct nitrox_softreq *sr) @@ -116,7 +83,7 @@ static void softreq_destroy(struct nitrox_softreq *sr) * create_sg_component - create SG componets for N5 device. * @sr: Request structure * @sgtbl: SG table - * @nr_comp: total number of components required + * @map_nents: number of dma mapped entries * * Component structure * @@ -140,7 +107,7 @@ static int create_sg_component(struct nitrox_softreq *sr, { struct nitrox_device *ndev = sr->ndev; struct nitrox_sgcomp *sgcomp; - struct nitrox_sglist *sglist; + struct scatterlist *sg; dma_addr_t dma; size_t sz_comp; int i, j, nr_sgcomp; @@ -154,17 +121,15 @@ static int create_sg_component(struct nitrox_softreq *sr, return -ENOMEM; sgtbl->sgcomp = sgcomp; - sgtbl->nr_sgcomp = nr_sgcomp; - sglist = sgtbl->sglist; + sg = sgtbl->sg; /* populate device sg component */ for (i = 0; i < nr_sgcomp; i++) { - for (j = 0; j < 4; j++) { - sgcomp->len[j] = cpu_to_be16(sglist->len); - sgcomp->dma[j] = cpu_to_be64(sglist->dma); - sglist++; + for (j = 0; j < 4 && sg; j++) { + sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg)); + sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg)); + sg = sg_next(sg); } - sgcomp++; } /* map the device sg component */ dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); @@ -174,8 +139,8 @@ static int create_sg_component(struct nitrox_softreq *sr, return -ENOMEM; } - sgtbl->dma = dma; - sgtbl->len = sz_comp; + sgtbl->sgcomp_dma = dma; + sgtbl->sgcomp_len = sz_comp; return 0; } @@ -193,66 +158,27 @@ static int dma_map_inbufs(struct nitrox_softreq *sr, { struct device *dev = DEV(sr->ndev); struct scatterlist *sg = req->src; - struct nitrox_sglist *glist; int i, nents, ret = 0; - dma_addr_t dma; - size_t sz; - nents = sg_nents(req->src); + nents = dma_map_sg(dev, req->src, sg_nents(req->src), + DMA_BIDIRECTIONAL); + if (!nents) + return -EINVAL; - /* creater gather list IV and src entries */ - sz = roundup((1 + nents), 4) * sizeof(*glist); - glist = kzalloc(sz, sr->gfp); - if (!glist) - return -ENOMEM; + for_each_sg(req->src, sg, nents, i) + sr->in.total_bytes += sg_dma_len(sg); - sr->in.sglist = glist; - /* map IV */ - dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, dma)) { - ret = -EINVAL; - goto iv_map_err; - } - - sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; - /* map src entries */ - nents = dma_map_sg(dev, req->src, nents, sr->in.dir); - if (!nents) { - ret = -EINVAL; - goto src_map_err; - } - sr->in.buf = req->src; - - /* store the mappings */ - glist->len = req->ivsize; - glist->dma = dma; - glist++; - sr->in.total_bytes += req->ivsize; - - for_each_sg(req->src, sg, nents, i) { - glist->len = sg_dma_len(sg); - glist->dma = sg_dma_address(sg); - sr->in.total_bytes += glist->len; - glist++; - } - /* roundup map count to align with entires in sg component */ - sr->in.map_bufs_cnt = (1 + nents); - - /* create NITROX gather component */ - ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt); + sr->in.sg = req->src; + sr->in.sgmap_cnt = nents; + ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt); if (ret) goto incomp_err; return 0; incomp_err: - dma_unmap_sg(dev, req->src, nents, sr->in.dir); - sr->in.map_bufs_cnt = 0; -src_map_err: - dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL); -iv_map_err: - kfree(sr->in.sglist); - sr->in.sglist = NULL; + dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); + sr->in.sgmap_cnt = 0; return ret; } @@ -260,104 +186,25 @@ static int dma_map_outbufs(struct nitrox_softreq *sr, struct se_crypto_request *req) { struct device *dev = DEV(sr->ndev); - struct nitrox_sglist *glist = sr->in.sglist; - struct nitrox_sglist *slist; - struct scatterlist *sg; - int i, nents, map_bufs_cnt, ret = 0; - size_t sz; - - nents = sg_nents(req->dst); - - /* create scatter list ORH, IV, dst entries and Completion header */ - sz = roundup((3 + nents), 4) * sizeof(*slist); - slist = kzalloc(sz, sr->gfp); - if (!slist) - return -ENOMEM; - - sr->out.sglist = slist; - sr->out.dir = DMA_BIDIRECTIONAL; - /* map ORH */ - sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN, - sr->out.dir); - if (dma_mapping_error(dev, sr->resp.orh_dma)) { - ret = -EINVAL; - goto orh_map_err; - } + int nents, ret = 0; - /* map completion */ - sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion, - COMP_HLEN, sr->out.dir); - if (dma_mapping_error(dev, sr->resp.completion_dma)) { - ret = -EINVAL; - goto compl_map_err; - } + nents = dma_map_sg(dev, req->dst, sg_nents(req->dst), + DMA_BIDIRECTIONAL); + if (!nents) + return -EINVAL; - sr->inplace = (req->src == req->dst) ? true : false; - /* out place */ - if (!sr->inplace) { - nents = dma_map_sg(dev, req->dst, nents, sr->out.dir); - if (!nents) { - ret = -EINVAL; - goto dst_map_err; - } - } - sr->out.buf = req->dst; - - /* store the mappings */ - /* orh */ - slist->len = ORH_HLEN; - slist->dma = sr->resp.orh_dma; - slist++; - - /* copy the glist mappings */ - if (sr->inplace) { - nents = sr->in.map_bufs_cnt - 1; - map_bufs_cnt = sr->in.map_bufs_cnt; - while (map_bufs_cnt--) { - slist->len = glist->len; - slist->dma = glist->dma; - slist++; - glist++; - } - } else { - /* copy iv mapping */ - slist->len = glist->len; - slist->dma = glist->dma; - slist++; - /* copy remaining maps */ - for_each_sg(req->dst, sg, nents, i) { - slist->len = sg_dma_len(sg); - slist->dma = sg_dma_address(sg); - slist++; - } - } - - /* completion */ - slist->len = COMP_HLEN; - slist->dma = sr->resp.completion_dma; - - sr->out.map_bufs_cnt = (3 + nents); - - ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt); + sr->out.sg = req->dst; + sr->out.sgmap_cnt = nents; + ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt); if (ret) goto outcomp_map_err; return 0; outcomp_map_err: - if (!sr->inplace) - dma_unmap_sg(dev, req->dst, nents, sr->out.dir); - sr->out.map_bufs_cnt = 0; - sr->out.buf = NULL; -dst_map_err: - dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); - sr->resp.completion_dma = 0; -compl_map_err: - dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); - sr->resp.orh_dma = 0; -orh_map_err: - kfree(sr->out.sglist); - sr->out.sglist = NULL; + dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL); + sr->out.sgmap_cnt = 0; + sr->out.sg = NULL; return ret; } @@ -422,6 +269,8 @@ static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) smp_mb__after_atomic(); return true; } + /* sync with other cpus */ + smp_mb__after_atomic(); return false; } @@ -477,8 +326,6 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) spin_lock_bh(&cmdq->backlog_qlock); list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { - struct skcipher_request *skreq; - /* submit until space available */ if (unlikely(cmdq_full(cmdq, ndev->qlen))) { ret = -ENOSPC; @@ -490,12 +337,8 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) /* sync with other cpus */ smp_mb__after_atomic(); - skreq = sr->skreq; /* post the command */ post_se_instr(sr, cmdq); - - /* backlog requests are posted, wakeup with -EINPROGRESS */ - skcipher_request_complete(skreq, -EINPROGRESS); } spin_unlock_bh(&cmdq->backlog_qlock); @@ -518,7 +361,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) } /* add to backlog list */ backlog_list_add(sr, cmdq); - return -EBUSY; + return -EINPROGRESS; } post_se_instr(sr, cmdq); @@ -535,7 +378,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) int nitrox_process_se_request(struct nitrox_device *ndev, struct se_crypto_request *req, completion_t callback, - struct skcipher_request *skreq) + void *cb_arg) { struct nitrox_softreq *sr; dma_addr_t ctx_handle = 0; @@ -552,12 +395,12 @@ int nitrox_process_se_request(struct nitrox_device *ndev, sr->flags = req->flags; sr->gfp = req->gfp; sr->callback = callback; - sr->skreq = skreq; + sr->cb_arg = cb_arg; atomic_set(&sr->status, REQ_NOT_POSTED); - WRITE_ONCE(sr->resp.orh, PENDING_SIG); - WRITE_ONCE(sr->resp.completion, PENDING_SIG); + sr->resp.orh = req->orh; + sr->resp.completion = req->comp; ret = softreq_map_iobuf(sr, req); if (ret) { @@ -598,13 +441,13 @@ int nitrox_process_se_request(struct nitrox_device *ndev, /* fill the packet instruction */ /* word 0 */ - sr->instr.dptr0 = cpu_to_be64(sr->in.dma); + sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma); /* word 1 */ sr->instr.ih.value = 0; sr->instr.ih.s.g = 1; - sr->instr.ih.s.gsz = sr->in.map_bufs_cnt; - sr->instr.ih.s.ssz = sr->out.map_bufs_cnt; + sr->instr.ih.s.gsz = sr->in.sgmap_cnt; + sr->instr.ih.s.ssz = sr->out.sgmap_cnt; sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value); @@ -626,11 +469,11 @@ int nitrox_process_se_request(struct nitrox_device *ndev, /* word 4 */ sr->instr.slc.value[0] = 0; - sr->instr.slc.s.ssz = sr->out.map_bufs_cnt; + sr->instr.slc.s.ssz = sr->out.sgmap_cnt; sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]); /* word 5 */ - sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma); + sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma); /* * No conversion for front data, @@ -664,6 +507,24 @@ void backlog_qflush_work(struct work_struct *work) post_backlog_cmds(cmdq); } +static bool sr_completed(struct nitrox_softreq *sr) +{ + u64 orh = READ_ONCE(*sr->resp.orh); + unsigned long timeout = jiffies + msecs_to_jiffies(1); + + if ((orh != PENDING_SIG) && (orh & 0xff)) + return true; + + while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) { + if (time_after(jiffies, timeout)) { + pr_err("comp not done\n"); + return false; + } + } + + return true; +} + /** * process_request_list - process completed requests * @ndev: N5 device @@ -675,8 +536,6 @@ static void process_response_list(struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = cmdq->ndev; struct nitrox_softreq *sr; - struct skcipher_request *skreq; - completion_t callback; int req_completed = 0, err = 0, budget; /* check all pending requests */ @@ -691,13 +550,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq) break; /* check orh and completion bytes updates */ - if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) { + if (!sr_completed(sr)) { /* request not completed, check for timeout */ if (!cmd_timeout(sr->tstamp, ndev->timeout)) break; dev_err_ratelimited(DEV(ndev), "Request timeout, orh 0x%016llx\n", - READ_ONCE(sr->resp.orh)); + READ_ONCE(*sr->resp.orh)); } atomic_dec(&cmdq->pending_count); atomic64_inc(&ndev->stats.completed); @@ -706,15 +565,12 @@ static void process_response_list(struct nitrox_cmdq *cmdq) /* remove from response list */ response_list_del(sr, cmdq); - callback = sr->callback; - skreq = sr->skreq; - /* ORH error code */ - err = READ_ONCE(sr->resp.orh) & 0xff; + err = READ_ONCE(*sr->resp.orh) & 0xff; softreq_destroy(sr); - if (callback) - callback(skreq, err); + if (sr->callback) + sr->callback(sr->cb_arg, err); req_completed++; } diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c new file mode 100644 index 000000000000..d4935d6cefdd --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/printk.h> + +#include <crypto/aes.h> +#include <crypto/skcipher.h> +#include <crypto/ctr.h> +#include <crypto/des.h> +#include <crypto/xts.h> + +#include "nitrox_dev.h" +#include "nitrox_common.h" +#include "nitrox_req.h" + +struct nitrox_cipher { + const char *name; + enum flexi_cipher value; +}; + +/** + * supported cipher list + */ +static const struct nitrox_cipher flexi_cipher_table[] = { + { "null", CIPHER_NULL }, + { "cbc(des3_ede)", CIPHER_3DES_CBC }, + { "ecb(des3_ede)", CIPHER_3DES_ECB }, + { "cbc(aes)", CIPHER_AES_CBC }, + { "ecb(aes)", CIPHER_AES_ECB }, + { "cfb(aes)", CIPHER_AES_CFB }, + { "rfc3686(ctr(aes))", CIPHER_AES_CTR }, + { "xts(aes)", CIPHER_AES_XTS }, + { "cts(cbc(aes))", CIPHER_AES_CBC_CTS }, + { NULL, CIPHER_INVALID } +}; + +static enum flexi_cipher flexi_cipher_type(const char *name) +{ + const struct nitrox_cipher *cipher = flexi_cipher_table; + + while (cipher->name) { + if (!strcmp(cipher->name, name)) + break; + cipher++; + } + return cipher->value; +} + +static int nitrox_skcipher_init(struct crypto_skcipher *tfm) +{ + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); + struct crypto_ctx_hdr *chdr; + + /* get the first device */ + nctx->ndev = nitrox_get_first_device(); + if (!nctx->ndev) + return -ENODEV; + + /* allocate nitrox crypto context */ + chdr = crypto_alloc_context(nctx->ndev); + if (!chdr) { + nitrox_put_device(nctx->ndev); + return -ENOMEM; + } + nctx->chdr = chdr; + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + + sizeof(struct ctx_hdr)); + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + + sizeof(struct nitrox_kcrypt_request)); + return 0; +} + +static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) +{ + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); + + /* free the nitrox crypto context */ + if (nctx->u.ctx_handle) { + struct flexi_crypto_context *fctx = nctx->u.fctx; + + memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys)); + memzero_explicit(&fctx->auth, sizeof(struct auth_keys)); + crypto_free_context((void *)nctx->chdr); + } + nitrox_put_device(nctx->ndev); + + nctx->u.ctx_handle = 0; + nctx->ndev = NULL; +} + +static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher, + int aes_keylen, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); + struct flexi_crypto_context *fctx; + union fc_ctx_flags *flags; + enum flexi_cipher cipher_type; + const char *name; + + name = crypto_tfm_alg_name(tfm); + cipher_type = flexi_cipher_type(name); + if (unlikely(cipher_type == CIPHER_INVALID)) { + pr_err("unsupported cipher: %s\n", name); + return -EINVAL; + } + + /* fill crypto context */ + fctx = nctx->u.fctx; + flags = &fctx->flags; + flags->f = 0; + flags->w0.cipher_type = cipher_type; + flags->w0.aes_keylen = aes_keylen; + flags->w0.iv_source = IV_FROM_DPTR; + flags->f = cpu_to_be64(*(u64 *)&flags->w0); + /* copy the key to context */ + memcpy(fctx->crypto.u.key, key, keylen); + + return 0; +} + +static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, + unsigned int keylen) +{ + int aes_keylen; + + aes_keylen = flexi_aes_keylen(keylen); + if (aes_keylen < 0) { + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); +} + +static int alloc_src_sglist(struct skcipher_request *skreq, int ivsize) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + int nents = sg_nents(skreq->src) + 1; + int ret; + + /* Allocate buffer to hold IV and input scatterlist array */ + ret = alloc_src_req_buf(nkreq, nents, ivsize); + if (ret) + return ret; + + nitrox_creq_copy_iv(nkreq->src, skreq->iv, ivsize); + nitrox_creq_set_src_sg(nkreq, nents, ivsize, skreq->src, + skreq->cryptlen); + + return 0; +} + +static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + int nents = sg_nents(skreq->dst) + 3; + int ret; + + /* Allocate buffer to hold ORH, COMPLETION and output scatterlist + * array + */ + ret = alloc_dst_req_buf(nkreq, nents); + if (ret) + return ret; + + nitrox_creq_set_orh(nkreq); + nitrox_creq_set_comp(nkreq); + nitrox_creq_set_dst_sg(nkreq, nents, ivsize, skreq->dst, + skreq->cryptlen); + + return 0; +} + +static void free_src_sglist(struct skcipher_request *skreq) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + + kfree(nkreq->src); +} + +static void free_dst_sglist(struct skcipher_request *skreq) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + + kfree(nkreq->dst); +} + +static void nitrox_skcipher_callback(void *arg, int err) +{ + struct skcipher_request *skreq = arg; + + free_src_sglist(skreq); + free_dst_sglist(skreq); + if (err) { + pr_err_ratelimited("request failed status 0x%0x\n", err); + err = -EINVAL; + } + + skcipher_request_complete(skreq, err); +} + +static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) +{ + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + int ivsize = crypto_skcipher_ivsize(cipher); + struct se_crypto_request *creq; + int ret; + + creq = &nkreq->creq; + creq->flags = skreq->base.flags; + creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + + /* fill the request */ + creq->ctrl.value = 0; + creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; + creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT); + /* param0: length of the data to be encrypted */ + creq->gph.param0 = cpu_to_be16(skreq->cryptlen); + creq->gph.param1 = 0; + /* param2: encryption data offset */ + creq->gph.param2 = cpu_to_be16(ivsize); + creq->gph.param3 = 0; + + creq->ctx_handle = nctx->u.ctx_handle; + creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); + + ret = alloc_src_sglist(skreq, ivsize); + if (ret) + return ret; + + ret = alloc_dst_sglist(skreq, ivsize); + if (ret) { + free_src_sglist(skreq); + return ret; + } + + /* send the crypto request */ + return nitrox_process_se_request(nctx->ndev, creq, + nitrox_skcipher_callback, skreq); +} + +static int nitrox_aes_encrypt(struct skcipher_request *skreq) +{ + return nitrox_skcipher_crypt(skreq, true); +} + +static int nitrox_aes_decrypt(struct skcipher_request *skreq) +{ + return nitrox_skcipher_crypt(skreq, false); +} + +static int nitrox_3des_setkey(struct crypto_skcipher *cipher, + const u8 *key, unsigned int keylen) +{ + if (keylen != DES3_EDE_KEY_SIZE) { + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return nitrox_skcipher_setkey(cipher, 0, key, keylen); +} + +static int nitrox_3des_encrypt(struct skcipher_request *skreq) +{ + return nitrox_skcipher_crypt(skreq, true); +} + +static int nitrox_3des_decrypt(struct skcipher_request *skreq) +{ + return nitrox_skcipher_crypt(skreq, false); +} + +static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); + struct flexi_crypto_context *fctx; + int aes_keylen, ret; + + ret = xts_check_key(tfm, key, keylen); + if (ret) + return ret; + + keylen /= 2; + + aes_keylen = flexi_aes_keylen(keylen); + if (aes_keylen < 0) { + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + fctx = nctx->u.fctx; + /* copy KEY2 */ + memcpy(fctx->auth.u.key2, (key + keylen), keylen); + + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); +} + +static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); + struct flexi_crypto_context *fctx; + int aes_keylen; + + if (keylen < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + fctx = nctx->u.fctx; + + memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE), + CTR_RFC3686_NONCE_SIZE); + + keylen -= CTR_RFC3686_NONCE_SIZE; + + aes_keylen = flexi_aes_keylen(keylen); + if (aes_keylen < 0) { + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); +} + +static struct skcipher_alg nitrox_skciphers[] = { { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "n5_cbc(aes)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "n5_ecb(aes)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "cfb(aes)", + .cra_driver_name = "n5_cfb(aes)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "n5_xts(aes)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_xts_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "n5_rfc3686(ctr(aes))", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, + .setkey = nitrox_aes_ctr_rfc3686_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, +}, { + .base = { + .cra_name = "cts(cbc(aes))", + .cra_driver_name = "n5_cts(cbc(aes))", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "n5_cbc(des3_ede)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = nitrox_3des_setkey, + .encrypt = nitrox_3des_encrypt, + .decrypt = nitrox_3des_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "n5_ecb(des3_ede)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = nitrox_3des_setkey, + .encrypt = nitrox_3des_encrypt, + .decrypt = nitrox_3des_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +} + +}; + +int nitrox_register_skciphers(void) +{ + return crypto_register_skciphers(nitrox_skciphers, + ARRAY_SIZE(nitrox_skciphers)); +} + +void nitrox_unregister_skciphers(void) +{ + crypto_unregister_skciphers(nitrox_skciphers, + ARRAY_SIZE(nitrox_skciphers)); +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c index 30c0aa874583..bf439d8256ba 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c +++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c @@ -6,7 +6,12 @@ #include "nitrox_hal.h" #include "nitrox_common.h" #include "nitrox_isr.h" +#include "nitrox_mbx.h" +/** + * num_vfs_valid - validate VF count + * @num_vfs: number of VF(s) + */ static inline bool num_vfs_valid(int num_vfs) { bool valid = false; @@ -48,7 +53,32 @@ static inline enum vf_mode num_vfs_to_mode(int num_vfs) return mode; } -static void pf_sriov_cleanup(struct nitrox_device *ndev) +static inline int vf_mode_to_nr_queues(enum vf_mode mode) +{ + int nr_queues = 0; + + switch (mode) { + case __NDEV_MODE_PF: + nr_queues = MAX_PF_QUEUES; + break; + case __NDEV_MODE_VF16: + nr_queues = 8; + break; + case __NDEV_MODE_VF32: + nr_queues = 4; + break; + case __NDEV_MODE_VF64: + nr_queues = 2; + break; + case __NDEV_MODE_VF128: + nr_queues = 1; + break; + } + + return nr_queues; +} + +static void nitrox_pf_cleanup(struct nitrox_device *ndev) { /* PF has no queues in SR-IOV mode */ atomic_set(&ndev->state, __NDEV_NOT_READY); @@ -60,7 +90,11 @@ static void pf_sriov_cleanup(struct nitrox_device *ndev) nitrox_common_sw_cleanup(ndev); } -static int pf_sriov_init(struct nitrox_device *ndev) +/** + * nitrox_pf_reinit - re-initialize PF resources once SR-IOV is disabled + * @ndev: NITROX device + */ +static int nitrox_pf_reinit(struct nitrox_device *ndev) { int err; @@ -86,6 +120,33 @@ static int pf_sriov_init(struct nitrox_device *ndev) return nitrox_crypto_register(); } +static void nitrox_sriov_cleanup(struct nitrox_device *ndev) +{ + /* unregister interrupts for PF in SR-IOV */ + nitrox_sriov_unregister_interrupts(ndev); + nitrox_mbox_cleanup(ndev); +} + +static int nitrox_sriov_init(struct nitrox_device *ndev) +{ + int ret; + + /* register interrupts for PF in SR-IOV */ + ret = nitrox_sriov_register_interupts(ndev); + if (ret) + return ret; + + ret = nitrox_mbox_init(ndev); + if (ret) + goto sriov_init_fail; + + return 0; + +sriov_init_fail: + nitrox_sriov_cleanup(ndev); + return ret; +} + static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) { struct nitrox_device *ndev = pci_get_drvdata(pdev); @@ -106,17 +167,32 @@ static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) } dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs); - ndev->num_vfs = num_vfs; ndev->mode = num_vfs_to_mode(num_vfs); + ndev->iov.num_vfs = num_vfs; + ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode); /* set bit in flags */ set_bit(__NDEV_SRIOV_BIT, &ndev->flags); /* cleanup PF resources */ - pf_sriov_cleanup(ndev); + nitrox_pf_cleanup(ndev); - config_nps_core_vfcfg_mode(ndev, ndev->mode); + /* PF SR-IOV mode initialization */ + err = nitrox_sriov_init(ndev); + if (err) + goto iov_fail; + config_nps_core_vfcfg_mode(ndev, ndev->mode); return num_vfs; + +iov_fail: + pci_disable_sriov(pdev); + /* clear bit in flags */ + clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); + ndev->iov.num_vfs = 0; + ndev->mode = __NDEV_MODE_PF; + /* reset back to working mode in PF */ + nitrox_pf_reinit(ndev); + return err; } static int nitrox_sriov_disable(struct pci_dev *pdev) @@ -134,12 +210,16 @@ static int nitrox_sriov_disable(struct pci_dev *pdev) /* clear bit in flags */ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); - ndev->num_vfs = 0; + ndev->iov.num_vfs = 0; + ndev->iov.max_vf_queues = 0; ndev->mode = __NDEV_MODE_PF; + /* cleanup PF SR-IOV resources */ + nitrox_sriov_cleanup(ndev); + config_nps_core_vfcfg_mode(ndev, ndev->mode); - return pf_sriov_init(ndev); + return nitrox_pf_reinit(ndev); } int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs) |