diff options
Diffstat (limited to 'drivers/crypto')
41 files changed, 1439 insertions, 420 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index caa98a7fe392..9f1baaec385f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -762,10 +762,12 @@ config CRYPTO_DEV_CCREE select CRYPTO_ECB select CRYPTO_CTR select CRYPTO_XTS + select CRYPTO_SM4 + select CRYPTO_SM3 help Say 'Y' to enable a driver for the REE interface of the Arm TrustZone CryptoCell family of processors. Currently the - CryptoCell 712, 710 and 630 are supported. + CryptoCell 713, 712, 710 and 630 are supported. Choose this if you wish to use hardware acceleration of cryptographic operations on the system REE. If unsure say Y. diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index f5c07498ea4f..4092c2aad8e2 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -520,8 +520,7 @@ static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, uint8_t src[16] = { 0 }; int rc = 0; - aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); + aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(aes_tfm)) { rc = PTR_ERR(aes_tfm); pr_warn("could not load aes cipher driver: %d\n", rc); diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 2d1f1db9f807..2ce3a16d3d10 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -4605,7 +4605,6 @@ static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg) crypto->cra_priority = cipher_pri; crypto->cra_alignmask = 0; crypto->cra_ctxsize = sizeof(struct iproc_ctx_s); - INIT_LIST_HEAD(&crypto->cra_list); crypto->cra_init = ablkcipher_cra_init; crypto->cra_exit = generic_cra_exit; @@ -4652,12 +4651,16 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg) hash->halg.statesize = sizeof(struct spu_hash_export_s); if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { - hash->setkey = ahash_setkey; hash->init = ahash_init; hash->update = ahash_update; hash->final = ahash_final; hash->finup = ahash_finup; hash->digest = ahash_digest; + if ((driver_alg->auth_info.alg == HASH_ALG_AES) && + ((driver_alg->auth_info.mode == HASH_MODE_XCBC) || + (driver_alg->auth_info.mode == HASH_MODE_CMAC))) { + hash->setkey = ahash_setkey; + } } else { hash->setkey = ahash_hmac_setkey; hash->init = ahash_hmac_init; @@ -4687,7 +4690,6 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg) aead->base.cra_priority = aead_pri; aead->base.cra_alignmask = 0; aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - INIT_LIST_HEAD(&aead->base.cra_list); aead->base.cra_flags |= CRYPTO_ALG_ASYNC; /* setkey set in alg initialization */ diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 869f092432de..92e593e2069a 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -72,6 +72,8 @@ #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ CAAM_CMD_SZ * 5) +#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) + #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) @@ -513,6 +515,61 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, return 0; } +static int chachapoly_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + desc = ctx->sh_desc_enc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, true, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + desc = ctx->sh_desc_dec; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, false, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int chachapoly_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + + ctx->authsize = authsize; + return chachapoly_set_sh_desc(aead); +} + +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; + + if (keylen != CHACHA_KEY_SIZE + saltlen) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ctx->cdata.key_virt = key; + ctx->cdata.keylen = keylen - saltlen; + + return chachapoly_set_sh_desc(aead); +} + static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { @@ -1031,6 +1088,40 @@ static void init_gcm_job(struct aead_request *req, /* End of blank commands */ } +static void init_chachapoly_job(struct aead_request *req, + struct aead_edesc *edesc, bool all_contig, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int assoclen = req->assoclen; + u32 *desc = edesc->hw_desc; + u32 ctx_iv_off = 4; + + init_aead_job(req, edesc, all_contig, encrypt); + + if (ivsize != CHACHAPOLY_IV_SIZE) { + /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */ + ctx_iv_off += 4; + + /* + * The associated data comes already with the IV but we need + * to skip it when we authenticate or encrypt... + */ + assoclen -= ivsize; + } + + append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen); + + /* + * For IPsec load the IV further in the same register. + * For RFC7539 simply load the 12 bytes nonce in a single operation + */ + append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ctx_iv_off << LDST_OFFSET_SHIFT); +} + static void init_authenc_job(struct aead_request *req, struct aead_edesc *edesc, bool all_contig, bool encrypt) @@ -1289,6 +1380,72 @@ static int gcm_encrypt(struct aead_request *req) return ret; } +static int chachapoly_encrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret; + + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, + true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + desc = edesc->hw_desc; + + init_chachapoly_job(req, edesc, all_contig, true); + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int chachapoly_decrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret; + + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, + false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + desc = edesc->hw_desc; + + init_chachapoly_job(req, edesc, all_contig, false); + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + static int ipsec_gcm_encrypt(struct aead_request *req) { if (req->assoclen < 8) @@ -3002,6 +3159,50 @@ static struct caam_aead_alg driver_aeads[] = { .geniv = true, }, }, + { + .aead = { + .base = { + .cra_name = "rfc7539(chacha20,poly1305)", + .cra_driver_name = "rfc7539-chacha20-poly1305-" + "caam", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = chachapoly_encrypt, + .decrypt = chachapoly_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc7539esp(chacha20,poly1305)", + .cra_driver_name = "rfc7539esp-chacha20-" + "poly1305-caam", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = chachapoly_encrypt, + .decrypt = chachapoly_decrypt, + .ivsize = 8, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + }, + }, }; static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, @@ -3135,7 +3336,7 @@ static int __init caam_algapi_init(void) struct device *ctrldev; struct caam_drv_private *priv; int i = 0, err = 0; - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; @@ -3168,14 +3369,38 @@ static int __init caam_algapi_init(void) * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + if (priv->era < 10) { + u32 cha_vid, cha_inst; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + ccha_inst = 0; + ptha_inst = 0; + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; + ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; + } /* If MD is present, limit digest size based on LP256 */ - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { @@ -3196,10 +3421,10 @@ static int __init caam_algapi_init(void) * Check support for AES modes not available * on LP devices. */ - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) - if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == - OP_ALG_AAI_XTS) - continue; + if (aes_vid == CHA_VER_VID_AES_LP && + (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_XTS) + continue; caam_skcipher_alg_init(t_alg); @@ -3232,21 +3457,28 @@ static int __init caam_algapi_init(void) if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) continue; + /* Skip CHACHA20 algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst) + continue; + + /* Skip POLY1305 algorithms if not supported by device */ + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst) + continue; + /* * Check support for AES algorithms not available * on LP devices. */ - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) - if (alg_aai == OP_ALG_AAI_GCM) - continue; + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) + continue; /* * Skip algorithms requiring message digests * if MD or MD size is not supported by device. */ - if (c2_alg_sel && - (!md_inst || (t_alg->aead.maxauthsize > md_limit))) - continue; + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && + (!md_inst || t_alg->aead.maxauthsize > md_limit)) + continue; caam_aead_alg_init(t_alg); diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 1a6f0da14106..7db1640d3577 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -1213,6 +1213,139 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, } EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); +/** + * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and + * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared + * descriptor (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with + * OP_ALG_AAI_AEAD. + * @adata: pointer to authentication transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with + * OP_ALG_AAI_AEAD. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @encap: true if encapsulation, false if decapsulation + * @is_qi: true when called from caam/qi + */ +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool encap, + const bool is_qi) +{ + u32 *key_jump_cmd, *wait_cmd; + u32 nfifo; + const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE); + + /* Note: Context registers are saved. */ + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, + CLASS_1 | KEY_DEST_CLASS_REG); + + /* For IPsec load the salt from keymat in the context register */ + if (is_ipsec) + append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4, + LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | + 4 << LDST_OFFSET_SHIFT); + + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 2 and 1 operations: Poly & ChaCha */ + if (encap) { + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + } else { + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + } + + if (is_qi) { + u32 *wait_load_cmd; + u32 ctx1_iv_off = is_ipsec ? 8 : 4; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + 4 << LDST_OFFSET_SHIFT); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ctx1_iv_off << LDST_OFFSET_SHIFT); + } + + /* + * MAGIC with NFIFO + * Read associated data from the input and send them to class1 and + * class2 alignment blocks. From class1 send data to output fifo and + * then write it to memory since we don't need to encrypt AD. + */ + nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 | + NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND; + append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3); + + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO | + FIFOLD_CLASS_CLASS1 | LDST_VLF); + append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK | + MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); + + /* IPsec - copy IV at the output */ + if (is_ipsec) + append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA | + 0x2 << 25); + + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL | + JUMP_COND_NOP | JUMP_TEST_ALL); + set_jump_tgt_here(desc, wait_cmd); + + if (encap) { + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, + CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + + /* Write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + } else { + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, + CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, + CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG); + + /* Load ICV for verification */ + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + } + + print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_chachapoly); + /* For skcipher encrypt and decrypt, read from req->src and write to req->dst */ static inline void skcipher_append_src_dst(u32 *desc) { @@ -1228,7 +1361,8 @@ static inline void skcipher_append_src_dst(u32 *desc) * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128 + * - OP_ALG_ALGSEL_CHACHA20 * @ivsize: initialization vector size * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @ctx1_iv_off: IV offset in CONTEXT1 register @@ -1293,7 +1427,8 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128 + * - OP_ALG_ALGSEL_CHACHA20 * @ivsize: initialization vector size * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @ctx1_iv_off: IV offset in CONTEXT1 register diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index 1315c8f6f951..d5ca42ff961a 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h @@ -96,6 +96,11 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi); +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool encap, + const bool is_qi); + void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, const bool is_rfc3686, const u32 ctx1_iv_off); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 23c9fc4975f8..c0d55310aade 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -2462,7 +2462,7 @@ static int __init caam_qi_algapi_init(void) struct device *ctrldev; struct caam_drv_private *priv; int i = 0, err = 0; - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; @@ -2497,14 +2497,34 @@ static int __init caam_qi_algapi_init(void) * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + if (priv->era < 10) { + u32 cha_vid, cha_inst; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + } /* If MD is present, limit digest size based on LP256 */ - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { @@ -2556,8 +2576,7 @@ static int __init caam_qi_algapi_init(void) * Check support for AES algorithms not available * on LP devices. */ - if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) && - (alg_aai == OP_ALG_AAI_GCM)) + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) continue; /* diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 7d8ac0222fa3..425d5d974613 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -462,7 +462,15 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - edesc->assoclen = cpu_to_caam32(req->assoclen); + if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE) + /* + * The associated data comes already with the IV but we need + * to skip it when we authenticate or encrypt... + */ + edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize); + else + edesc->assoclen = cpu_to_caam32(req->assoclen); edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4, DMA_TO_DEVICE); if (dma_mapping_error(dev, edesc->assoclen_dma)) { @@ -532,6 +540,68 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, return edesc; } +static int chachapoly_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct device *dev = ctx->dev; + struct caam_flc *flc; + u32 *desc; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, true, true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, false, true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static int chachapoly_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + + ctx->authsize = authsize; + return chachapoly_set_sh_desc(aead); +} + +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; + + if (keylen != CHACHA_KEY_SIZE + saltlen) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ctx->cdata.key_virt = key; + ctx->cdata.keylen = keylen - saltlen; + + return chachapoly_set_sh_desc(aead); +} + static int gcm_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -816,7 +886,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; u32 ctx1_iv_off = 0; const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == - OP_ALG_AAI_CTR_MOD128); + OP_ALG_AAI_CTR_MOD128) && + ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) != + OP_ALG_ALGSEL_CHACHA20); const bool is_rfc3686 = alg->caam.rfc3686; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", @@ -1494,7 +1566,23 @@ static struct caam_skcipher_alg driver_algs[] = { .ivsize = AES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, - } + }, + { + .skcipher = { + .base = { + .cra_name = "chacha20", + .cra_driver_name = "chacha20-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20, + }, }; static struct caam_aead_alg driver_aeads[] = { @@ -2611,6 +2699,50 @@ static struct caam_aead_alg driver_aeads[] = { { .aead = { .base = { + .cra_name = "rfc7539(chacha20,poly1305)", + .cra_driver_name = "rfc7539-chacha20-poly1305-" + "caam-qi2", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc7539esp(chacha20,poly1305)", + .cra_driver_name = "rfc7539esp-chacha20-" + "poly1305-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = 8, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + }, + }, + { + .aead = { + .base = { .cra_name = "authenc(hmac(sha512)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha512-" @@ -4908,6 +5040,11 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) alg_sel == OP_ALG_ALGSEL_AES) continue; + /* Skip CHACHA20 algorithms if not supported by device */ + if (alg_sel == OP_ALG_ALGSEL_CHACHA20 && + !priv->sec_attr.ccha_acc_num) + continue; + t_alg->caam.dev = dev; caam_skcipher_alg_init(t_alg); @@ -4940,11 +5077,22 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) c1_alg_sel == OP_ALG_ALGSEL_AES) continue; + /* Skip CHACHA20 algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && + !priv->sec_attr.ccha_acc_num) + continue; + + /* Skip POLY1305 algorithms if not supported by device */ + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && + !priv->sec_attr.ptha_acc_num) + continue; + /* * Skip algorithms requiring message digests * if MD not supported by device. */ - if (!priv->sec_attr.md_acc_num && c2_alg_sel) + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && + !priv->sec_attr.md_acc_num) continue; t_alg->caam.dev = dev; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 46924affa0bd..81712aa5d0f2 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for ahash functions of crypto API * * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP * * Based on caamalg.c crypto API driver. * @@ -1801,7 +1802,7 @@ static int __init caam_algapi_hash_init(void) int i = 0, err = 0; struct caam_drv_private *priv; unsigned int md_limit = SHA512_DIGEST_SIZE; - u32 cha_inst, cha_vid; + u32 md_inst, md_vid; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!dev_node) { @@ -1831,18 +1832,27 @@ static int __init caam_algapi_hash_init(void) * Register crypto algorithms the device supports. First, identify * presence and attributes of MD block. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + if (priv->era < 10) { + md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_inst = mdha & CHA_VER_NUM_MASK; + } /* * Skip registration of any hashing algorithms if MD block * is not present. */ - if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) + if (!md_inst) return -ENODEV; /* Limit digest size based on LP256 */ - if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) + if (md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; INIT_LIST_HEAD(&hash_list); diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 4fc209cbbeab..77ab28a2811a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for Public Key Cryptography * * Copyright 2016 Freescale Semiconductor, Inc. + * Copyright 2018 NXP * * There is no Shared Descriptor for PKC so that the Job Descriptor must carry * all the desired key parameters, input and output pointers. @@ -1017,7 +1018,7 @@ static int __init caam_pkc_init(void) struct platform_device *pdev; struct device *ctrldev; struct caam_drv_private *priv; - u32 cha_inst, pk_inst; + u32 pk_inst; int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); @@ -1045,8 +1046,11 @@ static int __init caam_pkc_init(void) return -ENODEV; /* Determine public key hardware accelerator presence. */ - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + if (priv->era < 10) + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + else + pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; /* Do not register algorithms if PKHA is not present. */ if (!pk_inst) diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 4318b0aa6fb9..a387c8d49a62 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for hw_random * * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP * * Based on caamalg.c crypto API driver. * @@ -309,6 +310,7 @@ static int __init caam_rng_init(void) struct platform_device *pdev; struct device *ctrldev; struct caam_drv_private *priv; + u32 rng_inst; int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); @@ -336,7 +338,13 @@ static int __init caam_rng_init(void) return -ENODEV; /* Check for an instantiated RNG before registration */ - if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK)) + if (priv->era < 10) + rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; + + if (!rng_inst) return -ENODEV; dev = caam_jr_alloc(); diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 9604ff7a335e..87d9efe4c7aa 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -36,6 +36,8 @@ #include <crypto/gcm.h> #include <crypto/sha.h> #include <crypto/md5.h> +#include <crypto/chacha.h> +#include <crypto/poly1305.h> #include <crypto/internal/aead.h> #include <crypto/authenc.h> #include <crypto/akcipher.h> diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 3fc793193821..16bbc72f041a 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -3,6 +3,7 @@ * Controller-level driver, kernel property detection, initialization * * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #include <linux/device.h> @@ -106,7 +107,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; struct caam_deco __iomem *deco = ctrlpriv->deco; unsigned int timeout = 100000; - u32 deco_dbg_reg, flags; + u32 deco_dbg_reg, deco_state, flags; int i; @@ -149,13 +150,22 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, timeout = 10000000; do { deco_dbg_reg = rd_reg32(&deco->desc_dbg); + + if (ctrlpriv->era < 10) + deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >> + DESC_DBG_DECO_STAT_SHIFT; + else + deco_state = (rd_reg32(&deco->dbg_exec) & + DESC_DER_DECO_STAT_MASK) >> + DESC_DER_DECO_STAT_SHIFT; + /* * If an error occured in the descriptor, then * the DECO status field will be set to 0x0D */ - if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == - DESC_DBG_DECO_STAT_HOST_ERR) + if (deco_state == DECO_STAT_HOST_ERR) break; + cpu_relax(); } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); @@ -491,7 +501,7 @@ static int caam_probe(struct platform_device *pdev) struct caam_perfmon *perfmon; #endif u32 scfgr, comp_params; - u32 cha_vid_ls; + u8 rng_vid; int pg_size; int BLOCK_OFFSET = 0; @@ -733,15 +743,19 @@ static int caam_probe(struct platform_device *pdev) goto caam_remove; } - cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls); + if (ctrlpriv->era < 10) + rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >> + CHA_VER_VID_SHIFT; /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation * In case of SoCs with Management Complex, RNG is managed by MC f/w. */ - if (!ctrlpriv->mc_en && - (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { + if (!ctrlpriv->mc_en && rng_vid >= 4) { ctrlpriv->rng4_sh_init = rd_reg32(&ctrl->r4tst[0].rdsta); /* diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index f76ff160a02c..ec10230178c5 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -4,6 +4,7 @@ * Definitions to support CAAM descriptor instruction generation * * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #ifndef DESC_H @@ -242,6 +243,7 @@ #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT) /* Offset in source/destination */ @@ -284,6 +286,12 @@ #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0 #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT) +/* Special Length definitions when dst=sm, nfifo-{sm,m} */ +#define LDLEN_MATH0 0 +#define LDLEN_MATH1 1 +#define LDLEN_MATH2 2 +#define LDLEN_MATH3 3 + /* * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE * Command Constructs @@ -408,6 +416,7 @@ #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT) /* @@ -1133,6 +1142,12 @@ #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT) #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT) +/* version register fields */ +#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */ +#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */ +#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */ +#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */ + #define OP_ALG_ALGSEL_SHIFT 16 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT) @@ -1152,6 +1167,8 @@ #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_AAI_SHIFT 4 #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT) @@ -1199,6 +1216,11 @@ #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT) +/* Chacha20 AAI set */ +#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT) + /* hmac/smac AAI set */ #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT) @@ -1387,6 +1409,7 @@ #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT) #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT) #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT) +#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT) #define MOVE_DEST_SHIFT 16 #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT) @@ -1413,6 +1436,10 @@ #define MOVELEN_MRSEL_SHIFT 0 #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT) +#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT) /* * MATH Command Constructs @@ -1589,6 +1616,7 @@ #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT) #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT) #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT) #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT) #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT) diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index d4256fa4a1d6..2980b8ef1fb1 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * const desc, u32 options) \ } APPEND_CMD_RET(jump, JUMP) APPEND_CMD_RET(move, MOVE) +APPEND_CMD_RET(move_len, MOVE_LEN) static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd) { @@ -327,7 +328,11 @@ static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \ u32 options) \ { \ PRINT_POS; \ - append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \ + if (options & LDST_LEN_MASK) \ + append_cmd(desc, CMD_##op | IMMEDIATE | options); \ + else \ + append_cmd(desc, CMD_##op | IMMEDIATE | options | \ + sizeof(type)); \ append_cmd(desc, immediate); \ } APPEND_CMD_RAW_IMM(load, LOAD, u32); diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 457815f965c0..3cd0822ea819 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -3,6 +3,7 @@ * CAAM hardware register-level view * * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #ifndef REGS_H @@ -211,6 +212,47 @@ struct jr_outentry { u32 jrstatus; /* Status for completed descriptor */ } __packed; +/* Version registers (Era 10+) e80-eff */ +struct version_regs { + u32 crca; /* CRCA_VERSION */ + u32 afha; /* AFHA_VERSION */ + u32 kfha; /* KFHA_VERSION */ + u32 pkha; /* PKHA_VERSION */ + u32 aesa; /* AESA_VERSION */ + u32 mdha; /* MDHA_VERSION */ + u32 desa; /* DESA_VERSION */ + u32 snw8a; /* SNW8A_VERSION */ + u32 snw9a; /* SNW9A_VERSION */ + u32 zuce; /* ZUCE_VERSION */ + u32 zuca; /* ZUCA_VERSION */ + u32 ccha; /* CCHA_VERSION */ + u32 ptha; /* PTHA_VERSION */ + u32 rng; /* RNG_VERSION */ + u32 trng; /* TRNG_VERSION */ + u32 aaha; /* AAHA_VERSION */ + u32 rsvd[10]; + u32 sr; /* SR_VERSION */ + u32 dma; /* DMA_VERSION */ + u32 ai; /* AI_VERSION */ + u32 qi; /* QI_VERSION */ + u32 jr; /* JR_VERSION */ + u32 deco; /* DECO_VERSION */ +}; + +/* Version registers bitfields */ + +/* Number of CHAs instantiated */ +#define CHA_VER_NUM_MASK 0xffull +/* CHA Miscellaneous Information */ +#define CHA_VER_MISC_SHIFT 8 +#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT) +/* CHA Revision Number */ +#define CHA_VER_REV_SHIFT 16 +#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT) +/* CHA Version ID */ +#define CHA_VER_VID_SHIFT 24 +#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT) + /* * caam_perfmon - Performance Monitor/Secure Memory Status/ * CAAM Global Status/Component Version IDs @@ -223,15 +265,13 @@ struct jr_outentry { #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) /* - * CHA version IDs / instantiation bitfields + * CHA version IDs / instantiation bitfields (< Era 10) * Defined for use with the cha_id fields in perfmon, but the same shift/mask * selectors can be used to pull out the number of instantiated blocks within * cha_num fields in perfmon because the locations are the same. */ #define CHA_ID_LS_AES_SHIFT 0 #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) -#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT) -#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT) #define CHA_ID_LS_DES_SHIFT 4 #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) @@ -241,9 +281,6 @@ struct jr_outentry { #define CHA_ID_LS_MD_SHIFT 12 #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT) #define CHA_ID_LS_RNG_SHIFT 16 #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) @@ -269,6 +306,13 @@ struct jr_outentry { #define CHA_ID_MS_JR_SHIFT 28 #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT) +/* Specific CHA version IDs */ +#define CHA_VER_VID_AES_LP 0x3ull +#define CHA_VER_VID_AES_HP 0x4ull +#define CHA_VER_VID_MD_LP256 0x0ull +#define CHA_VER_VID_MD_LP512 0x1ull +#define CHA_VER_VID_MD_HP 0x2ull + struct sec_vid { u16 ip_id; u8 maj_rev; @@ -479,8 +523,10 @@ struct caam_ctrl { struct rng4tst r4tst[2]; }; - u32 rsvd9[448]; + u32 rsvd9[416]; + /* Version registers - introduced with era 10 e80-eff */ + struct version_regs vreg; /* Performance Monitor f00-fff */ struct caam_perfmon perfmon; }; @@ -570,8 +616,10 @@ struct caam_job_ring { u32 rsvd11; u32 jrcommand; /* JRCRx - JobR command */ - u32 rsvd12[932]; + u32 rsvd12[900]; + /* Version registers - introduced with era 10 e80-eff */ + struct version_regs vreg; /* Performance Monitor f00-fff */ struct caam_perfmon perfmon; }; @@ -878,13 +926,19 @@ struct caam_deco { u32 rsvd29[48]; u32 descbuf[64]; /* DxDESB - Descriptor buffer */ u32 rscvd30[193]; -#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000 #define DESC_DBG_DECO_STAT_VALID 0x80000000 #define DESC_DBG_DECO_STAT_MASK 0x00F00000 +#define DESC_DBG_DECO_STAT_SHIFT 20 u32 desc_dbg; /* DxDDR - DECO Debug Register */ - u32 rsvd31[126]; + u32 rsvd31[13]; +#define DESC_DER_DECO_STAT_MASK 0x000F0000 +#define DESC_DER_DECO_STAT_SHIFT 16 + u32 dbg_exec; /* DxDER - DECO Debug Exec Register */ + u32 rsvd32[112]; }; +#define DECO_STAT_HOST_ERR 0xD + #define DECO_JQCR_WHL 0x20000000 #define DECO_JQCR_FOUR 0x10000000 diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c index 2ae6124e5da6..10075a97ff0d 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_algs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c @@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen) static int nitrox_skcipher_init(struct crypto_skcipher *tfm) { struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); - void *fctx; + struct crypto_ctx_hdr *chdr; /* get the first device */ nctx->ndev = nitrox_get_first_device(); @@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm) return -ENODEV; /* allocate nitrox crypto context */ - fctx = crypto_alloc_context(nctx->ndev); - if (!fctx) { + chdr = crypto_alloc_context(nctx->ndev); + if (!chdr) { nitrox_put_device(nctx->ndev); return -ENOMEM; } - nctx->u.ctx_handle = (uintptr_t)fctx; + nctx->chdr = chdr; + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + + sizeof(struct ctx_hdr)); crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + sizeof(struct nitrox_kcrypt_request)); return 0; @@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) memset(&fctx->crypto, 0, sizeof(struct crypto_keys)); memset(&fctx->auth, 0, sizeof(struct auth_keys)); - crypto_free_context((void *)fctx); + crypto_free_context((void *)nctx->chdr); } nitrox_put_device(nctx->ndev); @@ -153,13 +155,109 @@ static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); } +static int alloc_src_sglist(struct skcipher_request *skreq, int ivsize) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + int nents = sg_nents(skreq->src) + 1; + struct se_crypto_request *creq = &nkreq->creq; + char *iv; + struct scatterlist *sg; + + /* Allocate buffer to hold IV and input scatterlist array */ + nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp); + if (!nkreq->src) + return -ENOMEM; + + /* copy iv */ + iv = nkreq->src; + memcpy(iv, skreq->iv, ivsize); + + sg = (struct scatterlist *)(iv + ivsize); + creq->src = sg; + sg_init_table(sg, nents); + + /* Input format: + * +----+----------------+ + * | IV | SRC sg entries | + * +----+----------------+ + */ + + /* IV */ + sg = create_single_sg(sg, iv, ivsize); + /* SRC entries */ + create_multi_sg(sg, skreq->src); + + return 0; +} + +static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + int nents = sg_nents(skreq->dst) + 3; + int extralen = ORH_HLEN + COMP_HLEN; + struct se_crypto_request *creq = &nkreq->creq; + struct scatterlist *sg; + char *iv = nkreq->src; + + /* Allocate buffer to hold ORH, COMPLETION and output scatterlist + * array + */ + nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp); + if (!nkreq->dst) + return -ENOMEM; + + creq->orh = (u64 *)(nkreq->dst); + set_orh_value(creq->orh); + + creq->comp = (u64 *)(nkreq->dst + ORH_HLEN); + set_comp_value(creq->comp); + + sg = (struct scatterlist *)(nkreq->dst + ORH_HLEN + COMP_HLEN); + creq->dst = sg; + sg_init_table(sg, nents); + + /* Output format: + * +-----+----+----------------+-----------------+ + * | ORH | IV | DST sg entries | COMPLETION Bytes| + * +-----+----+----------------+-----------------+ + */ + + /* ORH */ + sg = create_single_sg(sg, creq->orh, ORH_HLEN); + /* IV */ + sg = create_single_sg(sg, iv, ivsize); + /* DST entries */ + sg = create_multi_sg(sg, skreq->dst); + /* COMPLETION Bytes */ + create_single_sg(sg, creq->comp, COMP_HLEN); + + return 0; +} + +static void free_src_sglist(struct skcipher_request *skreq) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + + kfree(nkreq->src); +} + +static void free_dst_sglist(struct skcipher_request *skreq) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + + kfree(nkreq->dst); +} + static void nitrox_skcipher_callback(struct skcipher_request *skreq, int err) { + free_src_sglist(skreq); + free_dst_sglist(skreq); if (err) { pr_err_ratelimited("request failed status 0x%0x\n", err); err = -EINVAL; } + skcipher_request_complete(skreq, err); } @@ -170,6 +268,7 @@ static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); int ivsize = crypto_skcipher_ivsize(cipher); struct se_crypto_request *creq; + int ret; creq = &nkreq->creq; creq->flags = skreq->base.flags; @@ -190,11 +289,15 @@ static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) creq->ctx_handle = nctx->u.ctx_handle; creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); - /* copy the iv */ - memcpy(creq->iv, skreq->iv, ivsize); - creq->ivsize = ivsize; - creq->src = skreq->src; - creq->dst = skreq->dst; + ret = alloc_src_sglist(skreq, ivsize); + if (ret) + return ret; + + ret = alloc_dst_sglist(skreq, ivsize); + if (ret) { + free_src_sglist(skreq); + return ret; + } nkreq->nctx = nctx; nkreq->skreq = skreq; diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 283e252385fb..247df32f687c 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -103,6 +103,16 @@ struct nitrox_q_vector { }; }; +/** + * struct nitrox_iov - SR-IOV information + * @num_vfs: number of VF(s) enabled + * @msix: MSI-X for PF in SR-IOV case + */ +struct nitrox_iov { + int num_vfs; + struct msix_entry msix; +}; + /* * NITROX Device states */ @@ -150,6 +160,9 @@ enum vf_mode { * @ctx_pool: DMA pool for crypto context * @pkt_inq: Packet input rings * @qvec: MSI-X queue vectors information + * @iov: SR-IOV informatin + * @num_vecs: number of MSI-X vectors + * @stats: request statistics * @hw: hardware information * @debugfs_dir: debugfs directory */ @@ -168,13 +181,13 @@ struct nitrox_device { int node; u16 qlen; u16 nr_queues; - int num_vfs; enum vf_mode mode; struct dma_pool *ctx_pool; struct nitrox_cmdq *pkt_inq; struct nitrox_q_vector *qvec; + struct nitrox_iov iov; int num_vecs; struct nitrox_stats stats; diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c index 88a77b8fb3fb..c5b797808680 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c @@ -13,6 +13,7 @@ * - NPS packet ring, AQMQ ring and ZQMQ ring */ #define NR_RING_VECTORS 3 +#define NR_NON_RING_VECTORS 1 /* base entry for packet ring/port */ #define PKT_RING_MSIX_BASE 0 #define NON_RING_MSIX_BASE 192 @@ -275,6 +276,7 @@ void nitrox_unregister_interrupts(struct nitrox_device *ndev) qvec->valid = false; } kfree(ndev->qvec); + ndev->qvec = NULL; pci_free_irq_vectors(pdev); } @@ -321,6 +323,7 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) if (qvec->ring >= ndev->nr_queues) break; + qvec->cmdq = &ndev->pkt_inq[qvec->ring]; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring); /* get the vector number */ vec = pci_irq_vector(pdev, i); @@ -335,13 +338,13 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet, (unsigned long)qvec); - qvec->cmdq = &ndev->pkt_inq[qvec->ring]; qvec->valid = true; } /* request irqs for non ring vectors */ i = NON_RING_MSIX_BASE; qvec = &ndev->qvec[i]; + qvec->ndev = ndev; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i); /* get the vector number */ @@ -356,7 +359,6 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet, (unsigned long)qvec); - qvec->ndev = ndev; qvec->valid = true; return 0; @@ -365,3 +367,81 @@ irq_fail: nitrox_unregister_interrupts(ndev); return ret; } + +void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev) +{ + struct pci_dev *pdev = ndev->pdev; + int i; + + for (i = 0; i < ndev->num_vecs; i++) { + struct nitrox_q_vector *qvec; + int vec; + + qvec = ndev->qvec + i; + if (!qvec->valid) + continue; + + vec = ndev->iov.msix.vector; + irq_set_affinity_hint(vec, NULL); + free_irq(vec, qvec); + + tasklet_disable(&qvec->resp_tasklet); + tasklet_kill(&qvec->resp_tasklet); + qvec->valid = false; + } + kfree(ndev->qvec); + ndev->qvec = NULL; + pci_disable_msix(pdev); +} + +int nitrox_sriov_register_interupts(struct nitrox_device *ndev) +{ + struct pci_dev *pdev = ndev->pdev; + struct nitrox_q_vector *qvec; + int vec, cpu; + int ret; + + /** + * only non ring vectors i.e Entry 192 is available + * for PF in SR-IOV mode. + */ + ndev->iov.msix.entry = NON_RING_MSIX_BASE; + ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS); + if (ret) { + dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n", + NON_RING_MSIX_BASE); + return ret; + } + + qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL); + if (!qvec) { + pci_disable_msix(pdev); + return -ENOMEM; + } + qvec->ndev = ndev; + + ndev->qvec = qvec; + ndev->num_vecs = NR_NON_RING_VECTORS; + snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", + NON_RING_MSIX_BASE); + + vec = ndev->iov.msix.vector; + ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec); + if (ret) { + dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", + NON_RING_MSIX_BASE); + goto iov_irq_fail; + } + cpu = num_online_cpus(); + irq_set_affinity_hint(vec, get_cpu_mask(cpu)); + + tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet, + (unsigned long)qvec); + qvec->valid = true; + + return 0; + +iov_irq_fail: + nitrox_sriov_unregister_interrupts(ndev); + return ret; +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h index 63418a6cc52c..1062c9336c1f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.h +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h @@ -6,5 +6,7 @@ int nitrox_register_interrupts(struct nitrox_device *ndev); void nitrox_unregister_interrupts(struct nitrox_device *ndev); +int nitrox_sriov_register_interupts(struct nitrox_device *ndev); +void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev); #endif /* __NITROX_ISR_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 2260efa42308..9138bae12521 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev) void *crypto_alloc_context(struct nitrox_device *ndev) { struct ctx_hdr *ctx; + struct crypto_ctx_hdr *chdr; void *vaddr; dma_addr_t dma; + chdr = kmalloc(sizeof(*chdr), GFP_KERNEL); + if (!chdr) + return NULL; + vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma); - if (!vaddr) + if (!vaddr) { + kfree(chdr); return NULL; + } /* fill meta data */ ctx = vaddr; @@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev) ctx->dma = dma; ctx->ctx_dma = dma + sizeof(struct ctx_hdr); - return ((u8 *)vaddr + sizeof(struct ctx_hdr)); + chdr->pool = ndev->ctx_pool; + chdr->dma = dma; + chdr->vaddr = vaddr; + + return chdr; } /** @@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev) */ void crypto_free_context(void *ctx) { - struct ctx_hdr *ctxp; + struct crypto_ctx_hdr *ctxp; if (!ctx) return; - ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr)); - dma_pool_free(ctxp->pool, ctxp, ctxp->dma); + ctxp = ctx; + dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma); + kfree(ctxp); } /** diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h index d091b6f5f5dd..d45ff91c19a9 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_req.h +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h @@ -7,6 +7,8 @@ #include "nitrox_dev.h" +#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL + /** * struct gphdr - General purpose Header * @param0: first parameter. @@ -46,13 +48,6 @@ union se_req_ctrl { } s; }; -struct nitrox_sglist { - u16 len; - u16 raz0; - u32 raz1; - dma_addr_t dma; -}; - #define MAX_IV_LEN 16 /** @@ -62,8 +57,10 @@ struct nitrox_sglist { * @ctx_handle: Crypto context handle. * @gph: GP Header * @ctrl: Request Information. - * @in: Input sglist - * @out: Output sglist + * @orh: ORH address + * @comp: completion address + * @src: Input sglist + * @dst: Output sglist */ struct se_crypto_request { u8 opcode; @@ -73,9 +70,8 @@ struct se_crypto_request { struct gphdr gph; union se_req_ctrl ctrl; - - u8 iv[MAX_IV_LEN]; - u16 ivsize; + u64 *orh; + u64 *comp; struct scatterlist *src; struct scatterlist *dst; @@ -181,18 +177,27 @@ struct flexi_crypto_context { struct auth_keys auth; }; +struct crypto_ctx_hdr { + struct dma_pool *pool; + dma_addr_t dma; + void *vaddr; +}; + struct nitrox_crypto_ctx { struct nitrox_device *ndev; union { u64 ctx_handle; struct flexi_crypto_context *fctx; } u; + struct crypto_ctx_hdr *chdr; }; struct nitrox_kcrypt_request { struct se_crypto_request creq; struct nitrox_crypto_ctx *nctx; struct skcipher_request *skreq; + u8 *src; + u8 *dst; }; /** @@ -369,26 +374,19 @@ struct nitrox_sgcomp { /* * strutct nitrox_sgtable - SG list information - * @map_cnt: Number of buffers mapped - * @nr_comp: Number of sglist components + * @sgmap_cnt: Number of buffers mapped * @total_bytes: Total bytes in sglist. - * @len: Total sglist components length. - * @dma: DMA address of sglist component. - * @dir: DMA direction. - * @buf: crypto request buffer. - * @sglist: SG list of input/output buffers. + * @sgcomp_len: Total sglist components length. + * @sgcomp_dma: DMA address of sglist component. + * @sg: crypto request buffer. * @sgcomp: sglist component for NITROX. */ struct nitrox_sgtable { - u8 map_bufs_cnt; - u8 nr_sgcomp; + u8 sgmap_cnt; u16 total_bytes; - u32 len; - dma_addr_t dma; - enum dma_data_direction dir; - - struct scatterlist *buf; - struct nitrox_sglist *sglist; + u32 sgcomp_len; + dma_addr_t sgcomp_dma; + struct scatterlist *sg; struct nitrox_sgcomp *sgcomp; }; @@ -398,10 +396,8 @@ struct nitrox_sgtable { #define COMP_HLEN 8 struct resp_hdr { - u64 orh; - dma_addr_t orh_dma; - u64 completion; - dma_addr_t completion_dma; + u64 *orh; + u64 *completion; }; typedef void (*completion_t)(struct skcipher_request *skreq, int err); @@ -427,7 +423,6 @@ struct nitrox_softreq { u32 flags; gfp_t gfp; atomic_t status; - bool inplace; struct nitrox_device *ndev; struct nitrox_cmdq *cmdq; @@ -443,4 +438,46 @@ struct nitrox_softreq { struct skcipher_request *skreq; }; +static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp) +{ + size_t size; + + size = sizeof(struct scatterlist) * nents; + size += extralen; + + return kzalloc(size, gfp); +} + +static inline struct scatterlist *create_single_sg(struct scatterlist *sg, + void *buf, int buflen) +{ + sg_set_buf(sg, buf, buflen); + sg++; + return sg; +} + +static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg, + struct scatterlist *from_sg) +{ + struct scatterlist *sg; + int i; + + for_each_sg(from_sg, sg, sg_nents(from_sg), i) { + sg_set_buf(to_sg, sg_virt(sg), sg->length); + to_sg++; + } + + return to_sg; +} + +static inline void set_orh_value(u64 *orh) +{ + WRITE_ONCE(*orh, PENDING_SIG); +} + +static inline void set_comp_value(u64 *comp) +{ + WRITE_ONCE(*comp, PENDING_SIG); +} + #endif /* __NITROX_REQ_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index 3987cd84c033..d566bb904ec2 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -13,7 +13,6 @@ #define FDATA_SIZE 32 /* Base destination port for the solicited requests */ #define SOLICIT_BASE_DPORT 256 -#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL #define REQ_NOT_POSTED 1 #define REQ_BACKLOG 2 @@ -52,58 +51,26 @@ static inline int incr_index(int index, int count, int max) return index; } -/** - * dma_free_sglist - unmap and free the sg lists. - * @ndev: N5 device - * @sgtbl: SG table - */ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) { struct nitrox_device *ndev = sr->ndev; struct device *dev = DEV(ndev); - struct nitrox_sglist *sglist; - - /* unmap in sgbuf */ - sglist = sr->in.sglist; - if (!sglist) - goto out_unmap; - - /* unmap iv */ - dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL); - /* unmpa src sglist */ - dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir); - /* unamp gather component */ - dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE); - kfree(sr->in.sglist); + + + dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, + DMA_TO_DEVICE); kfree(sr->in.sgcomp); - sr->in.sglist = NULL; - sr->in.buf = NULL; - sr->in.map_bufs_cnt = 0; - -out_unmap: - /* unmap out sgbuf */ - sglist = sr->out.sglist; - if (!sglist) - return; - - /* unmap orh */ - dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); - - /* unmap dst sglist */ - if (!sr->inplace) { - dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3), - sr->out.dir); - } - /* unmap completion */ - dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); + sr->in.sg = NULL; + sr->in.sgmap_cnt = 0; - /* unmap scatter component */ - dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE); - kfree(sr->out.sglist); + dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt, + DMA_BIDIRECTIONAL); + dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, + DMA_TO_DEVICE); kfree(sr->out.sgcomp); - sr->out.sglist = NULL; - sr->out.buf = NULL; - sr->out.map_bufs_cnt = 0; + sr->out.sg = NULL; + sr->out.sgmap_cnt = 0; } static void softreq_destroy(struct nitrox_softreq *sr) @@ -116,7 +83,7 @@ static void softreq_destroy(struct nitrox_softreq *sr) * create_sg_component - create SG componets for N5 device. * @sr: Request structure * @sgtbl: SG table - * @nr_comp: total number of components required + * @map_nents: number of dma mapped entries * * Component structure * @@ -140,7 +107,7 @@ static int create_sg_component(struct nitrox_softreq *sr, { struct nitrox_device *ndev = sr->ndev; struct nitrox_sgcomp *sgcomp; - struct nitrox_sglist *sglist; + struct scatterlist *sg; dma_addr_t dma; size_t sz_comp; int i, j, nr_sgcomp; @@ -154,17 +121,15 @@ static int create_sg_component(struct nitrox_softreq *sr, return -ENOMEM; sgtbl->sgcomp = sgcomp; - sgtbl->nr_sgcomp = nr_sgcomp; - sglist = sgtbl->sglist; + sg = sgtbl->sg; /* populate device sg component */ for (i = 0; i < nr_sgcomp; i++) { - for (j = 0; j < 4; j++) { - sgcomp->len[j] = cpu_to_be16(sglist->len); - sgcomp->dma[j] = cpu_to_be64(sglist->dma); - sglist++; + for (j = 0; j < 4 && sg; j++) { + sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg)); + sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg)); + sg = sg_next(sg); } - sgcomp++; } /* map the device sg component */ dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); @@ -174,8 +139,8 @@ static int create_sg_component(struct nitrox_softreq *sr, return -ENOMEM; } - sgtbl->dma = dma; - sgtbl->len = sz_comp; + sgtbl->sgcomp_dma = dma; + sgtbl->sgcomp_len = sz_comp; return 0; } @@ -193,66 +158,27 @@ static int dma_map_inbufs(struct nitrox_softreq *sr, { struct device *dev = DEV(sr->ndev); struct scatterlist *sg = req->src; - struct nitrox_sglist *glist; int i, nents, ret = 0; - dma_addr_t dma; - size_t sz; - nents = sg_nents(req->src); + nents = dma_map_sg(dev, req->src, sg_nents(req->src), + DMA_BIDIRECTIONAL); + if (!nents) + return -EINVAL; - /* creater gather list IV and src entries */ - sz = roundup((1 + nents), 4) * sizeof(*glist); - glist = kzalloc(sz, sr->gfp); - if (!glist) - return -ENOMEM; - - sr->in.sglist = glist; - /* map IV */ - dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, dma)) { - ret = -EINVAL; - goto iv_map_err; - } - - sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; - /* map src entries */ - nents = dma_map_sg(dev, req->src, nents, sr->in.dir); - if (!nents) { - ret = -EINVAL; - goto src_map_err; - } - sr->in.buf = req->src; - - /* store the mappings */ - glist->len = req->ivsize; - glist->dma = dma; - glist++; - sr->in.total_bytes += req->ivsize; - - for_each_sg(req->src, sg, nents, i) { - glist->len = sg_dma_len(sg); - glist->dma = sg_dma_address(sg); - sr->in.total_bytes += glist->len; - glist++; - } - /* roundup map count to align with entires in sg component */ - sr->in.map_bufs_cnt = (1 + nents); + for_each_sg(req->src, sg, nents, i) + sr->in.total_bytes += sg_dma_len(sg); - /* create NITROX gather component */ - ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt); + sr->in.sg = req->src; + sr->in.sgmap_cnt = nents; + ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt); if (ret) goto incomp_err; return 0; incomp_err: - dma_unmap_sg(dev, req->src, nents, sr->in.dir); - sr->in.map_bufs_cnt = 0; -src_map_err: - dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL); -iv_map_err: - kfree(sr->in.sglist); - sr->in.sglist = NULL; + dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); + sr->in.sgmap_cnt = 0; return ret; } @@ -260,104 +186,25 @@ static int dma_map_outbufs(struct nitrox_softreq *sr, struct se_crypto_request *req) { struct device *dev = DEV(sr->ndev); - struct nitrox_sglist *glist = sr->in.sglist; - struct nitrox_sglist *slist; - struct scatterlist *sg; - int i, nents, map_bufs_cnt, ret = 0; - size_t sz; + int nents, ret = 0; - nents = sg_nents(req->dst); + nents = dma_map_sg(dev, req->dst, sg_nents(req->dst), + DMA_BIDIRECTIONAL); + if (!nents) + return -EINVAL; - /* create scatter list ORH, IV, dst entries and Completion header */ - sz = roundup((3 + nents), 4) * sizeof(*slist); - slist = kzalloc(sz, sr->gfp); - if (!slist) - return -ENOMEM; - - sr->out.sglist = slist; - sr->out.dir = DMA_BIDIRECTIONAL; - /* map ORH */ - sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN, - sr->out.dir); - if (dma_mapping_error(dev, sr->resp.orh_dma)) { - ret = -EINVAL; - goto orh_map_err; - } - - /* map completion */ - sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion, - COMP_HLEN, sr->out.dir); - if (dma_mapping_error(dev, sr->resp.completion_dma)) { - ret = -EINVAL; - goto compl_map_err; - } - - sr->inplace = (req->src == req->dst) ? true : false; - /* out place */ - if (!sr->inplace) { - nents = dma_map_sg(dev, req->dst, nents, sr->out.dir); - if (!nents) { - ret = -EINVAL; - goto dst_map_err; - } - } - sr->out.buf = req->dst; - - /* store the mappings */ - /* orh */ - slist->len = ORH_HLEN; - slist->dma = sr->resp.orh_dma; - slist++; - - /* copy the glist mappings */ - if (sr->inplace) { - nents = sr->in.map_bufs_cnt - 1; - map_bufs_cnt = sr->in.map_bufs_cnt; - while (map_bufs_cnt--) { - slist->len = glist->len; - slist->dma = glist->dma; - slist++; - glist++; - } - } else { - /* copy iv mapping */ - slist->len = glist->len; - slist->dma = glist->dma; - slist++; - /* copy remaining maps */ - for_each_sg(req->dst, sg, nents, i) { - slist->len = sg_dma_len(sg); - slist->dma = sg_dma_address(sg); - slist++; - } - } - - /* completion */ - slist->len = COMP_HLEN; - slist->dma = sr->resp.completion_dma; - - sr->out.map_bufs_cnt = (3 + nents); - - ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt); + sr->out.sg = req->dst; + sr->out.sgmap_cnt = nents; + ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt); if (ret) goto outcomp_map_err; return 0; outcomp_map_err: - if (!sr->inplace) - dma_unmap_sg(dev, req->dst, nents, sr->out.dir); - sr->out.map_bufs_cnt = 0; - sr->out.buf = NULL; -dst_map_err: - dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); - sr->resp.completion_dma = 0; -compl_map_err: - dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); - sr->resp.orh_dma = 0; -orh_map_err: - kfree(sr->out.sglist); - sr->out.sglist = NULL; + dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL); + sr->out.sgmap_cnt = 0; + sr->out.sg = NULL; return ret; } @@ -556,8 +403,8 @@ int nitrox_process_se_request(struct nitrox_device *ndev, atomic_set(&sr->status, REQ_NOT_POSTED); - WRITE_ONCE(sr->resp.orh, PENDING_SIG); - WRITE_ONCE(sr->resp.completion, PENDING_SIG); + sr->resp.orh = req->orh; + sr->resp.completion = req->comp; ret = softreq_map_iobuf(sr, req); if (ret) { @@ -598,13 +445,13 @@ int nitrox_process_se_request(struct nitrox_device *ndev, /* fill the packet instruction */ /* word 0 */ - sr->instr.dptr0 = cpu_to_be64(sr->in.dma); + sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma); /* word 1 */ sr->instr.ih.value = 0; sr->instr.ih.s.g = 1; - sr->instr.ih.s.gsz = sr->in.map_bufs_cnt; - sr->instr.ih.s.ssz = sr->out.map_bufs_cnt; + sr->instr.ih.s.gsz = sr->in.sgmap_cnt; + sr->instr.ih.s.ssz = sr->out.sgmap_cnt; sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value); @@ -626,11 +473,11 @@ int nitrox_process_se_request(struct nitrox_device *ndev, /* word 4 */ sr->instr.slc.value[0] = 0; - sr->instr.slc.s.ssz = sr->out.map_bufs_cnt; + sr->instr.slc.s.ssz = sr->out.sgmap_cnt; sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]); /* word 5 */ - sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma); + sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma); /* * No conversion for front data, @@ -664,6 +511,11 @@ void backlog_qflush_work(struct work_struct *work) post_backlog_cmds(cmdq); } +static bool sr_completed(struct nitrox_softreq *sr) +{ + return (READ_ONCE(*sr->resp.orh) != READ_ONCE(*sr->resp.completion)); +} + /** * process_request_list - process completed requests * @ndev: N5 device @@ -691,13 +543,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq) break; /* check orh and completion bytes updates */ - if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) { + if (!sr_completed(sr)) { /* request not completed, check for timeout */ if (!cmd_timeout(sr->tstamp, ndev->timeout)) break; dev_err_ratelimited(DEV(ndev), "Request timeout, orh 0x%016llx\n", - READ_ONCE(sr->resp.orh)); + READ_ONCE(*sr->resp.orh)); } atomic_dec(&cmdq->pending_count); atomic64_inc(&ndev->stats.completed); @@ -710,7 +562,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq) skreq = sr->skreq; /* ORH error code */ - err = READ_ONCE(sr->resp.orh) & 0xff; + err = READ_ONCE(*sr->resp.orh) & 0xff; softreq_destroy(sr); if (callback) diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c index 30c0aa874583..7ba0cc5d6d02 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c +++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c @@ -7,6 +7,10 @@ #include "nitrox_common.h" #include "nitrox_isr.h" +/** + * num_vfs_valid - validate VF count + * @num_vfs: number of VF(s) + */ static inline bool num_vfs_valid(int num_vfs) { bool valid = false; @@ -48,7 +52,7 @@ static inline enum vf_mode num_vfs_to_mode(int num_vfs) return mode; } -static void pf_sriov_cleanup(struct nitrox_device *ndev) +static void nitrox_pf_cleanup(struct nitrox_device *ndev) { /* PF has no queues in SR-IOV mode */ atomic_set(&ndev->state, __NDEV_NOT_READY); @@ -60,7 +64,11 @@ static void pf_sriov_cleanup(struct nitrox_device *ndev) nitrox_common_sw_cleanup(ndev); } -static int pf_sriov_init(struct nitrox_device *ndev) +/** + * nitrox_pf_reinit - re-initialize PF resources once SR-IOV is disabled + * @ndev: NITROX device + */ +static int nitrox_pf_reinit(struct nitrox_device *ndev) { int err; @@ -86,6 +94,18 @@ static int pf_sriov_init(struct nitrox_device *ndev) return nitrox_crypto_register(); } +static int nitrox_sriov_init(struct nitrox_device *ndev) +{ + /* register interrupts for PF in SR-IOV */ + return nitrox_sriov_register_interupts(ndev); +} + +static void nitrox_sriov_cleanup(struct nitrox_device *ndev) +{ + /* unregister interrupts for PF in SR-IOV */ + nitrox_sriov_unregister_interrupts(ndev); +} + static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) { struct nitrox_device *ndev = pci_get_drvdata(pdev); @@ -106,17 +126,31 @@ static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) } dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs); - ndev->num_vfs = num_vfs; + ndev->iov.num_vfs = num_vfs; ndev->mode = num_vfs_to_mode(num_vfs); /* set bit in flags */ set_bit(__NDEV_SRIOV_BIT, &ndev->flags); /* cleanup PF resources */ - pf_sriov_cleanup(ndev); + nitrox_pf_cleanup(ndev); - config_nps_core_vfcfg_mode(ndev, ndev->mode); + /* PF SR-IOV mode initialization */ + err = nitrox_sriov_init(ndev); + if (err) + goto iov_fail; + config_nps_core_vfcfg_mode(ndev, ndev->mode); return num_vfs; + +iov_fail: + pci_disable_sriov(pdev); + /* clear bit in flags */ + clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); + ndev->iov.num_vfs = 0; + ndev->mode = __NDEV_MODE_PF; + /* reset back to working mode in PF */ + nitrox_pf_reinit(ndev); + return err; } static int nitrox_sriov_disable(struct pci_dev *pdev) @@ -134,12 +168,15 @@ static int nitrox_sriov_disable(struct pci_dev *pdev) /* clear bit in flags */ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); - ndev->num_vfs = 0; + ndev->iov.num_vfs = 0; ndev->mode = __NDEV_MODE_PF; + /* cleanup PF SR-IOV resources */ + nitrox_sriov_cleanup(ndev); + config_nps_core_vfcfg_mode(ndev, ndev->mode); - return pf_sriov_init(ndev); + return nitrox_pf_reinit(ndev); } int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs) diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 3c6fe57f91f8..9108015e56cc 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -346,9 +346,7 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); - cipher_tfm = crypto_alloc_cipher("aes", 0, - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); + cipher_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(cipher_tfm)) { pr_warn("could not load aes cipher driver\n"); return PTR_ERR(cipher_tfm); diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 01b82b82f8b8..e0ac376ceb74 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -58,6 +58,7 @@ struct cc_aead_ctx { unsigned int enc_keylen; unsigned int auth_keylen; unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */ + unsigned int hash_len; enum drv_cipher_mode cipher_mode; enum cc_flow_mode flow_mode; enum drv_hash_mode auth_mode; @@ -122,6 +123,13 @@ static void cc_aead_exit(struct crypto_aead *tfm) } } +static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm) +{ + struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); + + return cc_get_default_hash_len(ctx->drvdata); +} + static int cc_aead_init(struct crypto_aead *tfm) { struct aead_alg *alg = crypto_aead_alg(tfm); @@ -196,6 +204,7 @@ static int cc_aead_init(struct crypto_aead *tfm) ctx->auth_state.hmac.ipad_opad = NULL; ctx->auth_state.hmac.padded_authkey = NULL; } + ctx->hash_len = cc_get_aead_hash_len(tfm); return 0; @@ -327,7 +336,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx) /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -465,7 +474,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hashmode); - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); @@ -1001,7 +1010,7 @@ static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[], hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), - ctx->drvdata->hash_len_sz); + ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -1098,7 +1107,7 @@ static void cc_proc_scheme_desc(struct aead_request *req, hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr, - ctx->drvdata->hash_len_sz); + ctx->hash_len); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_cipher_do(&desc[idx], DO_PAD); @@ -1128,7 +1137,7 @@ static void cc_proc_scheme_desc(struct aead_request *req, hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), - ctx->drvdata->hash_len_sz); + ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 7623b29911af..989e70f43bd9 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -7,6 +7,7 @@ #include <crypto/internal/skcipher.h> #include <crypto/des.h> #include <crypto/xts.h> +#include <crypto/sm4.h> #include <crypto/scatterwalk.h> #include "cc_driver.h" @@ -83,6 +84,9 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE) return 0; break; + case S_DIN_to_SM4: + if (size == SM4_KEY_SIZE) + return 0; default: break; } @@ -122,6 +126,17 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p, if (IS_ALIGNED(size, DES_BLOCK_SIZE)) return 0; break; + case S_DIN_to_SM4: + switch (ctx_p->cipher_mode) { + case DRV_CIPHER_CTR: + return 0; + case DRV_CIPHER_ECB: + case DRV_CIPHER_CBC: + if (IS_ALIGNED(size, SM4_BLOCK_SIZE)) + return 0; + default: + break; + } default: break; } @@ -522,6 +537,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm, case S_DIN_to_DES: flow_mode = DIN_DES_DOUT; break; + case S_DIN_to_SM4: + flow_mode = DIN_SM4_DOUT; + break; default: dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode); return; @@ -1324,6 +1342,54 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, }, + { + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-ccree", + .blocksize = SM4_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_setkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_CBC, + .flow_mode = S_DIN_to_SM4, + .min_hw_rev = CC_HW_REV_713, + }, + { + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-ccree", + .blocksize = SM4_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_setkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = 0, + }, + .cipher_mode = DRV_CIPHER_ECB, + .flow_mode = S_DIN_to_SM4, + .min_hw_rev = CC_HW_REV_713, + }, + { + .name = "ctr(sm4)", + .driver_name = "ctr-sm4-ccree", + .blocksize = SM4_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_setkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_CTR, + .flow_mode = S_DIN_to_SM4, + .min_hw_rev = CC_HW_REV_713, + }, }; static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl, diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h index e032544f4e31..c8dac273c563 100644 --- a/drivers/crypto/ccree/cc_crypto_ctx.h +++ b/drivers/crypto/ccree/cc_crypto_ctx.h @@ -115,7 +115,8 @@ enum drv_hash_mode { DRV_HASH_CBC_MAC = 6, DRV_HASH_XCBC_MAC = 7, DRV_HASH_CMAC = 8, - DRV_HASH_MODE_NUM = 9, + DRV_HASH_SM3 = 9, + DRV_HASH_MODE_NUM = 10, DRV_HASH_RESERVE32B = S32_MAX }; @@ -127,6 +128,7 @@ enum drv_hash_hw_mode { DRV_HASH_HW_SHA512 = 4, DRV_HASH_HW_SHA384 = 12, DRV_HASH_HW_GHASH = 6, + DRV_HASH_HW_SM3 = 14, DRV_HASH_HW_RESERVE32B = S32_MAX }; diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 1ff229c2aeab..09a6ada0b547 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -43,6 +43,10 @@ struct cc_hw_data { /* Hardware revisions defs. */ +static const struct cc_hw_data cc713_hw = { + .name = "713", .rev = CC_HW_REV_713 +}; + static const struct cc_hw_data cc712_hw = { .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U }; @@ -56,6 +60,7 @@ static const struct cc_hw_data cc630p_hw = { }; static const struct of_device_id arm_ccree_dev_of_match[] = { + { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw }, { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw }, { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw }, { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw }, @@ -206,12 +211,10 @@ static int init_cc_resources(struct platform_device *plat_dev) new_drvdata->hw_rev = hw_rev->rev; if (hw_rev->rev >= CC_HW_REV_712) { - new_drvdata->hash_len_sz = HASH_LEN_SIZE_712; new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP); new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712); new_drvdata->ver_offset = CC_REG(HOST_VERSION_712); } else { - new_drvdata->hash_len_sz = HASH_LEN_SIZE_630; new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8); new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630); new_drvdata->ver_offset = CC_REG(HOST_VERSION_630); @@ -297,15 +300,17 @@ static int init_cc_resources(struct platform_device *plat_dev) return rc; } - /* Verify correct mapping */ - signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset); - if (signature_val != hw_rev->sig) { - dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", - signature_val, hw_rev->sig); - rc = -EINVAL; - goto post_clk_err; + if (hw_rev->rev <= CC_HW_REV_712) { + /* Verify correct mapping */ + signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset); + if (signature_val != hw_rev->sig) { + dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", + signature_val, hw_rev->sig); + rc = -EINVAL; + goto post_clk_err; + } + dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val); } - dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val); /* Display HW versions */ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", @@ -461,6 +466,14 @@ int cc_clk_on(struct cc_drvdata *drvdata) return 0; } +unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata) +{ + if (drvdata->hw_rev >= CC_HW_REV_712) + return HASH_LEN_SIZE_712; + else + return HASH_LEN_SIZE_630; +} + void cc_clk_off(struct cc_drvdata *drvdata) { struct clk *clk = drvdata->clk; diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index d608a4faf662..170790967854 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -36,12 +36,13 @@ extern bool cc_dump_desc; extern bool cc_dump_bytes; -#define DRV_MODULE_VERSION "4.0" +#define DRV_MODULE_VERSION "5.0" enum cc_hw_rev { CC_HW_REV_630 = 630, CC_HW_REV_710 = 710, - CC_HW_REV_712 = 712 + CC_HW_REV_712 = 712, + CC_HW_REV_713 = 713 }; #define CC_COHERENT_CACHE_PARAMS 0xEEE @@ -127,7 +128,6 @@ struct cc_drvdata { bool coherent; char *hw_rev_name; enum cc_hw_rev hw_rev; - u32 hash_len_sz; u32 axim_mon_offset; u32 sig_offset; u32 ver_offset; @@ -182,6 +182,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe); void fini_cc_regs(struct cc_drvdata *drvdata); int cc_clk_on(struct cc_drvdata *drvdata); void cc_clk_off(struct cc_drvdata *drvdata); +unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata); static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val) { diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index b9313306c36f..c80c9ae555d0 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -6,6 +6,7 @@ #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/md5.h> +#include <crypto/sm3.h> #include <crypto/internal/hash.h> #include "cc_driver.h" @@ -16,6 +17,7 @@ #define CC_MAX_HASH_SEQ_LEN 12 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE +#define CC_SM3_HASH_LEN_SIZE 8 struct cc_hash_handle { cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/ @@ -43,6 +45,9 @@ static u64 sha384_init[] = { static u64 sha512_init[] = { SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; +static const u32 sm3_init[] = { + SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, + SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size); @@ -82,6 +87,7 @@ struct cc_hash_ctx { int hash_mode; int hw_mode; int inter_digestsize; + unsigned int hash_len; struct completion setkey_comp; bool is_hmac; }; @@ -138,10 +144,10 @@ static void cc_init_req(struct device *dev, struct ahash_req_ctx *state, ctx->hash_mode == DRV_HASH_SHA384) memcpy(state->digest_bytes_len, digest_len_sha512_init, - ctx->drvdata->hash_len_sz); + ctx->hash_len); else memcpy(state->digest_bytes_len, digest_len_init, - ctx->drvdata->hash_len_sz); + ctx->hash_len); } if (ctx->hash_mode != DRV_HASH_NULL) { @@ -321,7 +327,7 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, /* Get final MAC result */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); /* TODO */ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); @@ -367,7 +373,7 @@ static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req, set_cipher_mode(&desc[idx], ctx->hw_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, ctx->hash_mode), - ctx->drvdata->hash_len_sz); + ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); @@ -440,7 +446,7 @@ static int cc_hash_digest(struct ahash_request *req) * digest */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); if (is_hmac) { set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); @@ -454,14 +460,14 @@ static int cc_hash_digest(struct ahash_request *req) /* Load the hash current length */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); if (is_hmac) { set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT); + ctx->hash_len, NS_BIT); } else { - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); if (nbytes) set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); else @@ -478,7 +484,7 @@ static int cc_hash_digest(struct ahash_request *req) hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT, 0); + ctx->hash_len, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_cipher_do(&desc[idx], DO_PAD); @@ -504,7 +510,7 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx, { /* Restore hash digest */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); @@ -513,10 +519,10 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx, /* Restore hash current length */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT); + ctx->hash_len, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -576,7 +582,7 @@ static int cc_hash_update(struct ahash_request *req) /* store the hash digest result in context */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); @@ -585,9 +591,9 @@ static int cc_hash_update(struct ahash_request *req) /* store current hash length in context */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT, 1); + ctx->hash_len, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); @@ -649,9 +655,9 @@ static int cc_do_finup(struct ahash_request *req, bool update) /* Pad the hash */ hw_desc_init(&desc[idx]); set_cipher_do(&desc[idx], DO_PAD); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT, 0); + ctx->hash_len, NS_BIT, 0); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_flow_mode(&desc[idx], S_HASH_to_DOUT); idx++; @@ -749,7 +755,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); @@ -831,7 +837,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -1069,6 +1075,16 @@ fail: return -ENOMEM; } +static int cc_get_hash_len(struct crypto_tfm *tfm) +{ + struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->hash_mode == DRV_HASH_SM3) + return CC_SM3_HASH_LEN_SIZE; + else + return cc_get_default_hash_len(ctx->drvdata); +} + static int cc_cra_init(struct crypto_tfm *tfm) { struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1086,7 +1102,7 @@ static int cc_cra_init(struct crypto_tfm *tfm) ctx->hw_mode = cc_alg->hw_mode; ctx->inter_digestsize = cc_alg->inter_digestsize; ctx->drvdata = cc_alg->drvdata; - + ctx->hash_len = cc_get_hash_len(tfm); return cc_alloc_ctx(ctx); } @@ -1465,8 +1481,8 @@ static int cc_hash_export(struct ahash_request *req, void *out) memcpy(out, state->digest_buff, ctx->inter_digestsize); out += ctx->inter_digestsize; - memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz); - out += ctx->drvdata->hash_len_sz; + memcpy(out, state->digest_bytes_len, ctx->hash_len); + out += ctx->hash_len; memcpy(out, &curr_buff_cnt, sizeof(u32)); out += sizeof(u32); @@ -1494,8 +1510,8 @@ static int cc_hash_import(struct ahash_request *req, const void *in) memcpy(state->digest_buff, in, ctx->inter_digestsize); in += ctx->inter_digestsize; - memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz); - in += ctx->drvdata->hash_len_sz; + memcpy(state->digest_bytes_len, in, ctx->hash_len); + in += ctx->hash_len; /* Sanity check the data as much as possible */ memcpy(&tmp, in, sizeof(u32)); @@ -1515,6 +1531,7 @@ struct cc_hash_template { char mac_name[CRYPTO_MAX_ALG_NAME]; char mac_driver_name[CRYPTO_MAX_ALG_NAME]; unsigned int blocksize; + bool is_mac; bool synchronize; struct ahash_alg template_ahash; int hash_mode; @@ -1536,6 +1553,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha1)", .mac_driver_name = "hmac-sha1-ccree", .blocksize = SHA1_BLOCK_SIZE, + .is_mac = true, .synchronize = false, .template_ahash = { .init = cc_hash_init, @@ -1562,6 +1580,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha256)", .mac_driver_name = "hmac-sha256-ccree", .blocksize = SHA256_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1587,6 +1606,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha224)", .mac_driver_name = "hmac-sha224-ccree", .blocksize = SHA224_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1612,6 +1632,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha384)", .mac_driver_name = "hmac-sha384-ccree", .blocksize = SHA384_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1637,6 +1658,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha512)", .mac_driver_name = "hmac-sha512-ccree", .blocksize = SHA512_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1662,6 +1684,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(md5)", .mac_driver_name = "hmac-md5-ccree", .blocksize = MD5_HMAC_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1682,9 +1705,34 @@ static struct cc_hash_template driver_hash[] = { .min_hw_rev = CC_HW_REV_630, }, { + .name = "sm3", + .driver_name = "sm3-ccree", + .blocksize = SM3_BLOCK_SIZE, + .is_mac = false, + .template_ahash = { + .init = cc_hash_init, + .update = cc_hash_update, + .final = cc_hash_final, + .finup = cc_hash_finup, + .digest = cc_hash_digest, + .export = cc_hash_export, + .import = cc_hash_import, + .setkey = cc_hash_setkey, + .halg = { + .digestsize = SM3_DIGEST_SIZE, + .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE), + }, + }, + .hash_mode = DRV_HASH_SM3, + .hw_mode = DRV_HASH_HW_SM3, + .inter_digestsize = SM3_DIGEST_SIZE, + .min_hw_rev = CC_HW_REV_713, + }, + { .mac_name = "xcbc(aes)", .mac_driver_name = "xcbc-aes-ccree", .blocksize = AES_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_mac_update, @@ -1708,6 +1756,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "cmac(aes)", .mac_driver_name = "cmac-aes-ccree", .blocksize = AES_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_mac_update, @@ -1780,6 +1829,7 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) unsigned int larval_seq_len = 0; struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)]; bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712); + bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713); int rc = 0; /* Copy-to-sram digest-len */ @@ -1845,6 +1895,17 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) sram_buff_ofs += sizeof(sha256_init); larval_seq_len = 0; + if (sm3_supported) { + cc_set_sram_desc(sm3_init, sram_buff_ofs, + ARRAY_SIZE(sm3_init), larval_seq, + &larval_seq_len); + rc = send_request_init(drvdata, larval_seq, larval_seq_len); + if (rc) + goto init_digest_const_err; + sram_buff_ofs += sizeof(sm3_init); + larval_seq_len = 0; + } + if (large_sha_supported) { cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs, (ARRAY_SIZE(sha384_init) * 2), larval_seq, @@ -1911,6 +1972,9 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) sizeof(sha224_init) + sizeof(sha256_init); + if (drvdata->hw_rev >= CC_HW_REV_713) + sram_size_to_alloc += sizeof(sm3_init); + if (drvdata->hw_rev >= CC_HW_REV_712) sram_size_to_alloc += sizeof(digest_len_sha512_init) + sizeof(sha384_init) + sizeof(sha512_init); @@ -1940,27 +2004,28 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) /* We either support both HASH and MAC or none */ if (driver_hash[alg].min_hw_rev > drvdata->hw_rev) continue; - - /* register hmac version */ - t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true); - if (IS_ERR(t_alg)) { - rc = PTR_ERR(t_alg); - dev_err(dev, "%s alg allocation failed\n", - driver_hash[alg].driver_name); - goto fail; - } - t_alg->drvdata = drvdata; - - rc = crypto_register_ahash(&t_alg->ahash_alg); - if (rc) { - dev_err(dev, "%s alg registration failed\n", - driver_hash[alg].driver_name); - kfree(t_alg); - goto fail; - } else { - list_add_tail(&t_alg->entry, &hash_handle->hash_list); + if (driver_hash[alg].is_mac) { + /* register hmac version */ + t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true); + if (IS_ERR(t_alg)) { + rc = PTR_ERR(t_alg); + dev_err(dev, "%s alg allocation failed\n", + driver_hash[alg].driver_name); + goto fail; + } + t_alg->drvdata = drvdata; + + rc = crypto_register_ahash(&t_alg->ahash_alg); + if (rc) { + dev_err(dev, "%s alg registration failed\n", + driver_hash[alg].driver_name); + kfree(t_alg); + goto fail; + } else { + list_add_tail(&t_alg->entry, + &hash_handle->hash_list); + } } - if (hw_mode == DRV_CIPHER_XCBC_MAC || hw_mode == DRV_CIPHER_CMAC) continue; @@ -2027,7 +2092,7 @@ static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], XCBC_MAC_K1_OFFSET), CC_AES_128_BIT_KEY_SIZE, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); + set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_AES); @@ -2162,6 +2227,8 @@ static const void *cc_larval_digest(struct device *dev, u32 mode) return sha384_init; case DRV_HASH_SHA512: return sha512_init; + case DRV_HASH_SM3: + return sm3_init; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); return md5_init; @@ -2182,6 +2249,8 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; struct cc_hash_handle *hash_handle = _drvdata->hash_handle; struct device *dev = drvdata_to_dev(_drvdata); + bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713); + cc_sram_addr_t addr; switch (mode) { case DRV_HASH_NULL: @@ -2200,19 +2269,31 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) sizeof(md5_init) + sizeof(sha1_init) + sizeof(sha224_init)); - case DRV_HASH_SHA384: + case DRV_HASH_SM3: return (hash_handle->larval_digest_sram_addr + sizeof(md5_init) + sizeof(sha1_init) + sizeof(sha224_init) + sizeof(sha256_init)); + case DRV_HASH_SHA384: + addr = (hash_handle->larval_digest_sram_addr + + sizeof(md5_init) + + sizeof(sha1_init) + + sizeof(sha224_init) + + sizeof(sha256_init)); + if (sm3_supported) + addr += sizeof(sm3_init); + return addr; case DRV_HASH_SHA512: - return (hash_handle->larval_digest_sram_addr + + addr = (hash_handle->larval_digest_sram_addr + sizeof(md5_init) + sizeof(sha1_init) + sizeof(sha224_init) + sizeof(sha256_init) + sizeof(sha384_init)); + if (sm3_supported) + addr += sizeof(sm3_init); + return addr; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); } diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h index 45985b955d2c..7a9b90db7db7 100644 --- a/drivers/crypto/ccree/cc_hw_queue_defs.h +++ b/drivers/crypto/ccree/cc_hw_queue_defs.h @@ -42,6 +42,7 @@ #define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND) #define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED) #define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH) +#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY) #define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP) #define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0) #define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1) @@ -107,6 +108,7 @@ enum cc_flow_mode { AES_to_AES_to_HASH_and_DOUT = 13, AES_to_AES_to_HASH = 14, AES_to_HASH_and_AES = 15, + DIN_SM4_DOUT = 16, DIN_AES_AESMAC = 17, HASH_to_DOUT = 18, /* setup flows */ @@ -114,9 +116,11 @@ enum cc_flow_mode { S_DIN_to_AES2 = 33, S_DIN_to_DES = 34, S_DIN_to_RC4 = 35, + S_DIN_to_SM4 = 36, S_DIN_to_HASH = 37, S_AES_to_DOUT = 38, S_AES2_to_DOUT = 39, + S_SM4_to_DOUT = 40, S_RC4_to_DOUT = 41, S_DES_to_DOUT = 42, S_HASH_to_DOUT = 43, @@ -394,6 +398,16 @@ static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc) } /* + * Set aes xor crypto key, this in some secenrios select SM3 engine + * + * @pdesc: pointer HW descriptor struct + */ +static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc) +{ + pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1); +} + +/* * Set the DOUT field of a HW descriptors to SRAM mode * Note: No need to check SRAM alignment since host requests do not use SRAM and * adaptor will enforce alignment check. @@ -455,6 +469,22 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode) } /* + * Set the cipher mode for hash algorithms. + * + * @pdesc: pointer HW descriptor struct + * @cipher_mode: Any one of the modes defined in [CC7x-DESC] + * @hash_mode: specifies which hash is being handled + */ +static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc, + enum drv_cipher_mode cipher_mode, + enum drv_hash_mode hash_mode) +{ + set_cipher_mode(pdesc, cipher_mode); + if (hash_mode == DRV_HASH_SM3) + set_aes_xor_crypto_key(pdesc); +} + +/* * Set the cipher configuration fields. * * @pdesc: pointer HW descriptor struct diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index db203f8be429..497c57803de7 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -1311,8 +1311,8 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) return -ENOSPC; } - err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], - &skb, CHCR_DECRYPT_OP); + err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], + &skb, CHCR_DECRYPT_OP); if (err || !skb) return err; skb->dev = u_ctx->lldi.ports[0]; @@ -2008,7 +2008,7 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out) memcpy(state->partial_hash, req_ctx->partial_hash, CHCR_HASH_MAX_DIGEST_SIZE); chcr_init_hctx_per_wr(state); - return 0; + return 0; } static int chcr_ahash_import(struct ahash_request *areq, const void *in) @@ -2249,7 +2249,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); - aead_request_set_ad(subreq, req->assoclen); + aead_request_set_ad(subreq, req->assoclen); return op_type ? crypto_aead_decrypt(subreq) : crypto_aead_encrypt(subreq); } @@ -3118,12 +3118,12 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) aeadctx->mayverify = VERIFY_HW; break; case ICV_12: - aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; - aeadctx->mayverify = VERIFY_HW; + aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; + aeadctx->mayverify = VERIFY_HW; break; case ICV_14: - aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; - aeadctx->mayverify = VERIFY_HW; + aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; + aeadctx->mayverify = VERIFY_HW; break; case ICV_16: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index eb2a0a73cbed..b4c24a35b3d0 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -261,7 +261,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm) struct geode_aes_op *op = crypto_tfm_ctx(tfm); op->fallback.cip = crypto_alloc_cipher(name, 0, - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback.cip)) { printk(KERN_ERR "Error allocating fallback algo %s\n", name); diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 3aef1d43e435..d531c14020dc 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -970,7 +970,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = { .cra_name = "cbc(des)", .cra_driver_name = "safexcel-cbc-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1010,7 +1010,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = { .cra_name = "ecb(des)", .cra_driver_name = "safexcel-ecb-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1074,7 +1074,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "safexcel-cbc-des3_ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1114,7 +1114,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "safexcel-ecb-des3_ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 4e6ff32f8a7e..a2105cf33abb 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -20,6 +20,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/stmp_device.h> +#include <linux/clk.h> #include <crypto/aes.h> #include <crypto/sha.h> @@ -82,6 +83,7 @@ struct dcp { spinlock_t lock[DCP_MAX_CHANS]; struct task_struct *thread[DCP_MAX_CHANS]; struct crypto_queue queue[DCP_MAX_CHANS]; + struct clk *dcp_clk; }; enum dcp_chan { @@ -1053,11 +1055,24 @@ static int mxs_dcp_probe(struct platform_device *pdev) /* Re-align the structure so it fits the DCP constraints. */ sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); - /* Restart the DCP block. */ - ret = stmp_reset_block(sdcp->base); + /* DCP clock is optional, only used on some SOCs */ + sdcp->dcp_clk = devm_clk_get(dev, "dcp"); + if (IS_ERR(sdcp->dcp_clk)) { + if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) + return PTR_ERR(sdcp->dcp_clk); + sdcp->dcp_clk = NULL; + } + ret = clk_prepare_enable(sdcp->dcp_clk); if (ret) return ret; + /* Restart the DCP block. */ + ret = stmp_reset_block(sdcp->base); + if (ret) { + dev_err(dev, "Failed reset\n"); + goto err_disable_unprepare_clk; + } + /* Initialize control register. */ writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, @@ -1094,7 +1109,8 @@ static int mxs_dcp_probe(struct platform_device *pdev) NULL, "mxs_dcp_chan/sha"); if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { dev_err(dev, "Error starting SHA thread!\n"); - return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); + ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); + goto err_disable_unprepare_clk; } sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, @@ -1151,6 +1167,10 @@ err_destroy_aes_thread: err_destroy_sha_thread: kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); + +err_disable_unprepare_clk: + clk_disable_unprepare(sdcp->dcp_clk); + return ret; } @@ -1170,6 +1190,8 @@ static int mxs_dcp_remove(struct platform_device *pdev) kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); + clk_disable_unprepare(sdcp->dcp_clk); + platform_set_drvdata(pdev, NULL); global_sdcp = NULL; diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index a553ffddb11b..4c0ea8142923 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1222,7 +1222,6 @@ static int omap_aes_probe(struct platform_device *pdev) algp = &dd->pdata->algs_info[i].algs_list[j]; pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); err = crypto_register_alg(algp); if (err) @@ -1240,7 +1239,6 @@ static int omap_aes_probe(struct platform_device *pdev) algp = &aalg->base; pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); err = crypto_register_aead(aalg); if (err) diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index eb95b0d7f184..6369019219d4 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1069,7 +1069,6 @@ static int omap_des_probe(struct platform_device *pdev) algp = &dd->pdata->algs_info[i].algs_list[j]; pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); err = crypto_register_alg(algp); if (err) diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index 585e1cab9ae3..25c13e26d012 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c @@ -376,7 +376,6 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, alg->cra_module = THIS_MODULE; alg->cra_init = qce_ablkcipher_init; alg->cra_exit = qce_ablkcipher_exit; - INIT_LIST_HEAD(&alg->cra_list); INIT_LIST_HEAD(&tmpl->entry); tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index d8a5db11b7ea..fc45f5ea6fdd 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -508,7 +508,6 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, base->cra_alignmask = 0; base->cra_module = THIS_MODULE; base->cra_init = qce_ahash_cra_init; - INIT_LIST_HEAD(&base->cra_list); snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index bbf166a97ad3..8c32a3059b4a 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -1321,7 +1321,6 @@ static int sahara_register_algs(struct sahara_dev *dev) unsigned int i, j, k, l; for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { - INIT_LIST_HEAD(&aes_algs[i].cra_list); err = crypto_register_alg(&aes_algs[i]); if (err) goto err_aes_algs; |