summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/Kconfig25
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamalg.c134
-rw-r--r--drivers/crypto/caam/caamhash.c88
-rw-r--r--drivers/crypto/caam/caamrng.c29
-rw-r--r--drivers/crypto/caam/ctrl.c418
-rw-r--r--drivers/crypto/caam/desc.h17
-rw-r--r--drivers/crypto/caam/intern.h20
-rw-r--r--drivers/crypto/caam/jr.c340
-rw-r--r--drivers/crypto/caam/jr.h5
-rw-r--r--drivers/crypto/caam/regs.h14
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h34
-rw-r--r--drivers/crypto/dcp.c49
-rw-r--r--drivers/crypto/ixp4xx_crypto.c30
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/crypto/omap-aes.c6
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/picoxcell_crypto.c32
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c103
-rw-r--r--drivers/crypto/tegra-aes.c26
21 files changed, 832 insertions, 559 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ca89f6b84b06..e7555ff4cafd 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
- This module adds a job ring operation interface, and configures h/w
+ This module creates job ring devices, and configures h/w
to operate as a DPAA component automatically, depending
on h/w feature availability.
To compile this driver as a module, choose M here: the module
will be called caam.
+config CRYPTO_DEV_FSL_CAAM_JR
+ tristate "Freescale CAAM Job Ring driver backend"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ help
+ Enables the driver module for Job Rings which are part of
+ Freescale's Cryptographic Accelerator
+ and Assurance Module (CAAM). This module adds a job ring operation
+ interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called caam_jr.
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default n
help
Enable the Job Ring's interrupt coalescing feature.
@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
@@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index d56bd0ec65d8..550758a333e7 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-caam-objs := ctrl.o jr.o error.o key_gen.o
+caam-objs := ctrl.o
+caam_jr-objs := jr.o key_gen.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 7c63b72ecd75..4cf5dec826e1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -86,6 +86,7 @@
#else
#define debug(format, arg...)
#endif
+static struct list_head alg_list;
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1(u32 *desc, u32 type)
@@ -817,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
- req->cryptlen, 1);
+ req->cryptlen - ctx->authsize, 1);
#endif
if (err) {
@@ -971,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
(edesc->src_nents ? : 1);
in_options = LDST_SGF;
}
- if (encrypt)
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen - authsize, in_options);
- else
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen, in_options);
+
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
if (likely(req->src == req->dst)) {
if (all_contig) {
@@ -997,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
}
}
if (encrypt)
- append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
+ out_options);
else
append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
out_options);
@@ -1047,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
in_options = LDST_SGF;
}
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen - authsize, in_options);
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
if (contig & GIV_DST_CONTIG) {
dst_dma = edesc->iv_dma;
@@ -1065,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
}
}
- append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
+ out_options);
}
/*
@@ -1129,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
- int desc_bytes, bool *all_contig_ptr)
+ int desc_bytes, bool *all_contig_ptr,
+ bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1144,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool assoc_chained = false, src_chained = false, dst_chained = false;
int ivsize = crypto_aead_ivsize(aead);
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ unsigned int authsize = ctx->authsize;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+ dst_nents = sg_count(req->dst,
+ req->cryptlen +
+ (encrypt ? authsize : (-authsize)),
+ &dst_chained);
+ } else {
+ src_nents = sg_count(req->src,
+ req->cryptlen +
+ (encrypt ? authsize : 0),
+ &src_chained);
+ }
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
DMA_TO_DEVICE, assoc_chained);
@@ -1233,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
- req->cryptlen += ctx->authsize;
-
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig);
+ CAAM_CMD_SZ, &all_contig, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1274,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req)
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig);
+ CAAM_CMD_SZ, &all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1331,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+ dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
+ &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
DMA_TO_DEVICE, assoc_chained);
@@ -1425,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
u32 *desc;
int ret = 0;
- req->cryptlen += ctx->authsize;
-
/* allocate extended descriptor */
edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
CAAM_CMD_SZ, &contig);
@@ -2057,7 +2065,6 @@ static struct caam_alg_template driver_algs[] = {
struct caam_crypto_alg {
struct list_head entry;
- struct device *ctrldev;
int class1_alg_type;
int class2_alg_type;
int alg_op;
@@ -2070,14 +2077,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)
struct caam_crypto_alg *caam_alg =
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
- int tgt_jr = atomic_inc_return(&priv->tfm_count);
- /*
- * distribute tfms across job rings to ensure in-order
- * crypto request processing per tfm
- */
- ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
/* copy descriptor header template value */
ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2104,44 +2109,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_exit(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
struct caam_crypto_alg *t_alg, *n;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return;
-
- ctrldev = &pdev->dev;
- of_node_put(dev_node);
- priv = dev_get_drvdata(ctrldev);
-
- if (!priv->alg_list.next)
+ if (!alg_list.next)
return;
- list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
- struct caam_alg_template
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
*template)
{
struct caam_crypto_alg *t_alg;
@@ -2149,7 +2136,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
if (!t_alg) {
- dev_err(ctrldev, "failed to allocate t_alg\n");
+ pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -2181,62 +2168,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type;
t_alg->alg_op = template->alg_op;
- t_alg->ctrldev = ctrldev;
return t_alg;
}
static int __init caam_algapi_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
int i = 0, err = 0;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- INIT_LIST_HEAD(&priv->alg_list);
-
- atomic_set(&priv->tfm_count, -1);
+ INIT_LIST_HEAD(&alg_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
- t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
+ t_alg = caam_alg_alloc(&driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_algs[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_algs[i].driver_name);
continue;
}
err = crypto_register_alg(&t_alg->crypto_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->alg_list);
+ list_add_tail(&t_alg->entry, &alg_list);
}
- if (!list_empty(&priv->alg_list))
- dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
- (char *)of_get_property(dev_node, "compatible", NULL));
+ if (!list_empty(&alg_list))
+ pr_info("caam algorithms registered in /proc/crypto\n");
return err;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e732bd962e98..0378328f47a7 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -94,6 +94,9 @@
#define debug(format, arg...)
#endif
+
+static struct list_head hash_list;
+
/* ahash per-session context */
struct caam_hash_ctx {
struct device *jrdev;
@@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
- struct device *ctrldev;
int alg_type;
int alg_op;
struct ahash_alg ahash_alg;
@@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- int tgt_jr = atomic_inc_return(&priv->tfm_count);
int ret = 0;
/*
- * distribute tfms across job rings to ensure in-order
+ * Get a Job ring from Job Ring driver to ensure in-order
* crypto request processing per tfm
*/
- ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
-
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
/* copy descriptor header template value */
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
@@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_hash_exit(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
struct caam_hash_alg *t_alg, *n;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
+ if (!hash_list.next)
return;
- ctrldev = &pdev->dev;
- of_node_put(dev_node);
- priv = dev_get_drvdata(ctrldev);
-
- if (!priv->hash_list.next)
- return;
-
- list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
@@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void)
}
static struct caam_hash_alg *
-caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
+caam_hash_alloc(struct caam_hash_template *template,
bool keyed)
{
struct caam_hash_alg *t_alg;
@@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
if (!t_alg) {
- dev_err(ctrldev, "failed to allocate t_alg\n");
+ pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg->alg_type = template->alg_type;
t_alg->alg_op = template->alg_op;
- t_alg->ctrldev = ctrldev;
return t_alg;
}
static int __init caam_algapi_hash_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
int i = 0, err = 0;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- INIT_LIST_HEAD(&priv->hash_list);
-
- atomic_set(&priv->tfm_count, -1);
+ INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
@@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void)
struct caam_hash_alg *t_alg;
/* register hmac version */
- t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
+ t_alg = caam_hash_alloc(&driver_hash[i], true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->hash_list);
+ list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */
- t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
+ t_alg = caam_hash_alloc(&driver_hash[i], false);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->hash_list);
+ list_add_tail(&t_alg->entry, &hash_list);
}
return err;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d1939a9539c0..28486b19fc36 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -273,34 +273,23 @@ static struct hwrng caam_rng = {
static void __exit caam_rng_exit(void)
{
+ caam_jr_free(rng_ctx.jrdev);
hwrng_unregister(&caam_rng);
}
static int __init caam_rng_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
+ struct device *dev;
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
+ dev = caam_jr_alloc();
+ if (IS_ERR(dev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(dev);
+ }
- caam_init_rng(&rng_ctx, priv->jrdev[0]);
+ caam_init_rng(&rng_ctx, dev);
- dev_info(priv->jrdev[0], "registering rng-caam\n");
+ dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bc6d820812b6..63fb1af2c431 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,82 +16,75 @@
#include "error.h"
#include "ctrl.h"
-static int caam_remove(struct platform_device *pdev)
-{
- struct device *ctrldev;
- struct caam_drv_private *ctrlpriv;
- struct caam_drv_private_jr *jrpriv;
- struct caam_full __iomem *topregs;
- int ring, ret = 0;
-
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
-
- /* shut down JobRs */
- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
- ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
- jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
- irq_dispose_mapping(jrpriv->irq);
- }
-
- /* Shut down debug views */
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
-
- /* Unmap controller region */
- iounmap(&topregs->ctrl);
-
- kfree(ctrlpriv->jrdev);
- kfree(ctrlpriv);
-
- return ret;
-}
-
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
*/
-static void build_instantiation_desc(u32 *desc)
+static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
{
- u32 *jump_cmd;
+ u32 *jump_cmd, op_flags;
init_job_desc(desc, 0);
+ op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+
/* INIT RNG in non-test mode */
- append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- OP_ALG_AS_INIT);
+ append_operation(desc, op_flags);
+
+ if (!handle && do_sk) {
+ /*
+ * For SH0, Secure Keys must be generated as well
+ */
+
+ /* wait for done */
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /*
+ * load 1 to clear written reg:
+ * resets the done interrrupt and returns the RNG to idle.
+ */
+ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+ /* Initialize State Handle */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AAI_RNG4_SK);
+ }
- /* wait for done */
- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
- set_jump_tgt_here(desc, jump_cmd);
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
- /*
- * load 1 to clear written reg:
- * resets the done interrupt and returns the RNG to idle.
- */
- append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+static void build_deinstantiation_desc(u32 *desc, int handle)
+{
+ init_job_desc(desc, 0);
- /* generate secure keys (non-test) */
+ /* Uninstantiate State Handle 0 */
append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- OP_ALG_RNG4_SK);
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
+
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
-static int instantiate_rng(struct device *ctrldev)
+/*
+ * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+ * the software (no JR/QI used).
+ * @ctrldev - pointer to device
+ * @status - descriptor status, after being run
+ *
+ * Return: - 0 if no error occurred
+ * - -ENODEV if the DECO couldn't be acquired
+ * - -EAGAIN if an error occurred while executing the descriptor
+ */
+static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+ u32 *status)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_full __iomem *topregs;
unsigned int timeout = 100000;
- u32 *desc;
- int i, ret = 0;
-
- desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
- if (!desc) {
- dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
- return -ENOMEM;
- }
- build_instantiation_desc(desc);
+ u32 deco_dbg_reg, flags;
+ int i;
/* Set the bit to request direct access to DECO0 */
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
@@ -103,36 +96,219 @@ static int instantiate_rng(struct device *ctrldev)
if (!timeout) {
dev_err(ctrldev, "failed to acquire DECO 0\n");
- ret = -EIO;
- goto out;
+ clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ return -ENODEV;
}
for (i = 0; i < desc_len(desc); i++)
- topregs->deco.descbuf[i] = *(desc + i);
+ wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
+
+ flags = DECO_JQCR_WHL;
+ /*
+ * If the descriptor length is longer than 4 words, then the
+ * FOUR bit in JRCTRL register must be set.
+ */
+ if (desc_len(desc) >= 4)
+ flags |= DECO_JQCR_FOUR;
- wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR);
+ /* Instruct the DECO to execute it */
+ wr_reg32(&topregs->deco.jr_ctl_hi, flags);
timeout = 10000000;
- while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) &&
- --timeout)
+ do {
+ deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
+ /*
+ * If an error occured in the descriptor, then
+ * the DECO status field will be set to 0x0D
+ */
+ if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+ DESC_DBG_DECO_STAT_HOST_ERR)
+ break;
cpu_relax();
+ } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
- if (!timeout) {
- dev_err(ctrldev, "failed to instantiate RNG\n");
- ret = -EIO;
- }
+ *status = rd_reg32(&topregs->deco.op_status_hi) &
+ DECO_OP_STATUS_HI_ERR_MASK;
+ /* Mark the DECO as free */
clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
-out:
+
+ if (!timeout)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * instantiate_rng - builds and executes a descriptor on DECO0,
+ * which initializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ * by an external entry, 0 otherwise.
+ * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+ * Caution: this can be done only once; if the keys need to be
+ * regenerated, a POR is required
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ * f.i. there was a RNG hardware error due to not "good enough"
+ * entropy being aquired.
+ */
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ int gen_sk)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_full __iomem *topregs;
+ struct rng4tst __iomem *r4tst;
+ u32 *desc, status, rdsta_val;
+ int ret = 0, sh_idx;
+
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ r4tst = &topregs->ctrl.r4tst[0];
+
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, this state handle
+ * was initialized by somebody else, so it's left alone.
+ */
+ if ((1 << sh_idx) & state_handle_mask)
+ continue;
+
+ /* Create the descriptor for instantiating RNG State Handle */
+ build_instantiation_desc(desc, sh_idx, gen_sk);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ /*
+ * If ret is not 0, or descriptor status is not 0, then
+ * something went wrong. No need to try the next state
+ * handle (if available), bail out here.
+ * Also, if for some reason, the State Handle didn't get
+ * instantiated although the descriptor has finished
+ * without any error (HW optimizations for later
+ * CAAM eras), then try again.
+ */
+ rdsta_val =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
+ if (status || !(rdsta_val & (1 << sh_idx)))
+ ret = -EAGAIN;
+ if (ret)
+ break;
+
+ dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+ /* Clear the contents before recreating the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
+ }
+
kfree(desc);
+
return ret;
}
/*
- * By default, the TRNG runs for 200 clocks per sample;
- * 1600 clocks per sample generates better entropy.
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ * which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
*/
-static void kick_trng(struct platform_device *pdev)
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+ u32 *desc, status;
+ int sh_idx, ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deintialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle
+ */
+ build_deinstantiation_desc(desc, sh_idx);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ if (ret || status) {
+ dev_err(ctrldev,
+ "Failed to deinstantiate RNG4 SH%d\n",
+ sh_idx);
+ break;
+ }
+ dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+ }
+ }
+
+ kfree(desc);
+
+ return ret;
+}
+
+static int caam_remove(struct platform_device *pdev)
+{
+ struct device *ctrldev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_full __iomem *topregs;
+ int ring, ret = 0;
+
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+ /* Remove platform devices for JobRs */
+ for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+ if (ctrlpriv->jrpdev[ring])
+ of_device_unregister(ctrlpriv->jrpdev[ring]);
+ }
+
+ /* De-initialize RNG state handles initialized by this driver. */
+ if (ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+ /* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+ /* Unmap controller region */
+ iounmap(&topregs->ctrl);
+
+ kfree(ctrlpriv->jrpdev);
+ kfree(ctrlpriv);
+
+ return ret;
+}
+
+/*
+ * kick_trng - sets the various parameters for enabling the initialization
+ * of the RNG4 block in CAAM
+ * @pdev - pointer to the platform device
+ * @ent_delay - Defines the length (in system clocks) of each entropy sample.
+ */
+static void kick_trng(struct platform_device *pdev, int ent_delay)
{
struct device *ctrldev = &pdev->dev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
@@ -145,14 +321,31 @@ static void kick_trng(struct platform_device *pdev)
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
- /* 1600 clocks per sample */
+
+ /*
+ * Performance-wise, it does not make sense to
+ * set the delay to a value that is lower
+ * than the last one that worked (i.e. the state handles
+ * were instantiated properly. Thus, instead of wasting
+ * time trying to set the values controlling the sample
+ * frequency, the function simply returns.
+ */
+ val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+ >> RTSDCTL_ENT_DLY_SHIFT;
+ if (ent_delay <= val) {
+ /* put RNG4 into run mode */
+ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ return;
+ }
+
val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+ (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
wr_reg32(&r4tst->rtsdctl, val);
- /* min. freq. count */
- wr_reg32(&r4tst->rtfrqmin, 400);
- /* max. freq. count */
- wr_reg32(&r4tst->rtfrqmax, 6400);
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+ /* max. freq. count, equal to 8 times the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
/* put RNG4 into run mode */
clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
}
@@ -193,7 +386,7 @@ EXPORT_SYMBOL(caam_get_era);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, rspec;
+ int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
@@ -258,8 +451,9 @@ static int caam_probe(struct platform_device *pdev)
rspec++;
}
- ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
- if (ctrlpriv->jrdev == NULL) {
+ ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
+ GFP_KERNEL);
+ if (ctrlpriv->jrpdev == NULL) {
iounmap(&topregs->ctrl);
return -ENOMEM;
}
@@ -267,13 +461,24 @@ static int caam_probe(struct platform_device *pdev)
ring = 0;
ctrlpriv->total_jobrs = 0;
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ ctrlpriv->jrpdev[ring] =
+ of_platform_device_create(np, NULL, dev);
+ if (!ctrlpriv->jrpdev[ring]) {
+ pr_warn("JR%d Platform device creation error\n", ring);
+ continue;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
if (!ring) {
for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ ctrlpriv->jrpdev[ring] =
+ of_platform_device_create(np, NULL, dev);
+ if (!ctrlpriv->jrpdev[ring]) {
+ pr_warn("JR%d Platform device creation error\n",
+ ring);
+ continue;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
@@ -299,16 +504,55 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
- * already instantiated ,do RNG instantiation
+ * already instantiated, do RNG instantiation
*/
- if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
- !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
- kick_trng(pdev);
- ret = instantiate_rng(dev);
+ if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
+ /*
+ * If the secure keys (TDKEK, JDKEK, TDSK), were already
+ * generated, signal this to the function that is instantiating
+ * the state handles. An error would occur if RNG4 attempts
+ * to regenerate these keys before the next POR.
+ */
+ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+ ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+ do {
+ int inst_handles =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
+ RDSTA_IFMASK;
+ /*
+ * If either SH were instantiated by somebody else
+ * (e.g. u-boot) then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ kick_trng(pdev, ent_delay);
+ ent_delay += 400;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modfiy the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a sucessful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
+ } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
+ dev_err(dev, "failed to instantiate RNG");
caam_remove(pdev);
return ret;
}
+ /*
+ * Set handles init'ed by this module as the complement of the
+ * already initialized ones
+ */
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
/* Enable RDB bit so that RNG works faster */
setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 53b296f78b0d..7e4500f18df6 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,8 +1155,15 @@ struct sec4_sg_entry {
/* randomizer AAI set */
#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
-#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT)
-#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
/* hmac/smac AAI set */
#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
@@ -1178,12 +1185,6 @@ struct sec4_sg_entry {
#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
-/* RNG4 set */
-#define OP_ALG_RNG4_SHIFT 4
-#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
-
-#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
-
#define OP_ALG_AS_SHIFT 2
#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 34c4b9f7fbfa..6d85fcc5bd0a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -37,13 +37,16 @@ struct caam_jrentry_info {
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
- struct device *parentdev; /* points back to controller dev */
- struct platform_device *jr_pdev;/* points to platform device for JR */
+ struct list_head list_node; /* Job Ring device list */
+ struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
int irq; /* One per queue */
+ /* Number of scatterlist crypt transforms active on the JobR */
+ atomic_t tfm_count ____cacheline_aligned;
+
/* Job ring info */
int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
@@ -63,7 +66,7 @@ struct caam_drv_private_jr {
struct caam_drv_private {
struct device *dev;
- struct device **jrdev; /* Alloc'ed array per sub-device */
+ struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
struct platform_device *pdev;
/* Physical-presence section */
@@ -80,12 +83,11 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
- /* which jr allocated to scatterlist crypto */
- atomic_t tfm_count ____cacheline_aligned;
- /* list of registered crypto algorithms (mk generic context handle?) */
- struct list_head alg_list;
- /* list of registered hash algorithms (mk generic context handle?) */
- struct list_head hash_list;
+#define RNG4_MAX_HANDLES 2
+ /* RNG4 block */
+ u32 rng4_sh_init; /* This bitmap shows which of the State
+ Handles of the RNG4 block are initialized
+ by this driver */
/*
* debugfs entries for developer view into driver/device
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index bdb786d5a5e5..1d80bd3636c5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -6,6 +6,7 @@
*/
#include <linux/of_irq.h>
+#include <linux/of_address.h>
#include "compat.h"
#include "regs.h"
@@ -13,6 +14,113 @@
#include "desc.h"
#include "intern.h"
+struct jr_driver_data {
+ /* List of Physical JobR's with the Driver */
+ struct list_head jr_list;
+ spinlock_t jr_alloc_lock; /* jr_list lock */
+} ____cacheline_aligned;
+
+static struct jr_driver_data driver_data;
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ unsigned int timeout = 100000;
+
+ /*
+ * mask interrupts since we are going to poll
+ * for reset completion status
+ */
+ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ /* initiate flush (required prior to reset) */
+ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+ JRINT_ERR_HALT_INPROGRESS) && --timeout)
+ cpu_relax();
+
+ if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+ JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+ dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+ return -EIO;
+ }
+
+ /* initiate reset */
+ timeout = 100000;
+ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+ cpu_relax();
+
+ if (timeout == 0) {
+ dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+ return -EIO;
+ }
+
+ /* unmask interrupts */
+ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ return 0;
+}
+
+/*
+ * Shutdown JobR independent of platform property code
+ */
+int caam_jr_shutdown(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ dma_addr_t inpbusaddr, outbusaddr;
+ int ret;
+
+ ret = caam_reset_hw_jr(dev);
+
+ tasklet_kill(&jrp->irqtask);
+
+ /* Release interrupt */
+ free_irq(jrp->irq, dev);
+
+ /* Free rings */
+ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+ jrp->outring, outbusaddr);
+ kfree(jrp->entinfo);
+
+ return ret;
+}
+
+static int caam_jr_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct device *jrdev;
+ struct caam_drv_private_jr *jrpriv;
+
+ jrdev = &pdev->dev;
+ jrpriv = dev_get_drvdata(jrdev);
+
+ /*
+ * Return EBUSY if job ring already allocated.
+ */
+ if (atomic_read(&jrpriv->tfm_count)) {
+ dev_err(jrdev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ /* Release ring */
+ ret = caam_jr_shutdown(jrdev);
+ if (ret)
+ dev_err(jrdev, "Failed to shut down job ring\n");
+ irq_dispose_mapping(jrpriv->irq);
+
+ return ret;
+}
+
/* Main per-ring interrupt handler */
static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
{
@@ -128,6 +236,59 @@ static void caam_jr_dequeue(unsigned long devarg)
}
/**
+ * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
+ *
+ * returns : pointer to the newly allocated physical
+ * JobR dev can be written to if successful.
+ **/
+struct device *caam_jr_alloc(void)
+{
+ struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+ struct device *dev = NULL;
+ int min_tfm_cnt = INT_MAX;
+ int tfm_cnt;
+
+ spin_lock(&driver_data.jr_alloc_lock);
+
+ if (list_empty(&driver_data.jr_list)) {
+ spin_unlock(&driver_data.jr_alloc_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
+ tfm_cnt = atomic_read(&jrpriv->tfm_count);
+ if (tfm_cnt < min_tfm_cnt) {
+ min_tfm_cnt = tfm_cnt;
+ min_jrpriv = jrpriv;
+ }
+ if (!min_tfm_cnt)
+ break;
+ }
+
+ if (min_jrpriv) {
+ atomic_inc(&min_jrpriv->tfm_count);
+ dev = min_jrpriv->dev;
+ }
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ return dev;
+}
+EXPORT_SYMBOL(caam_jr_alloc);
+
+/**
+ * caam_jr_free() - Free the Job Ring
+ * @rdev - points to the dev that identifies the Job ring to
+ * be released.
+ **/
+void caam_jr_free(struct device *rdev)
+{
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
+
+ atomic_dec(&jrpriv->tfm_count);
+}
+EXPORT_SYMBOL(caam_jr_free);
+
+/**
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
* descriptor.
@@ -207,46 +368,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
}
EXPORT_SYMBOL(caam_jr_enqueue);
-static int caam_reset_hw_jr(struct device *dev)
-{
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- unsigned int timeout = 100000;
-
- /*
- * mask interrupts since we are going to poll
- * for reset completion status
- */
- setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
-
- /* initiate flush (required prior to reset) */
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
- while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
- JRINT_ERR_HALT_INPROGRESS) && --timeout)
- cpu_relax();
-
- if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
- JRINT_ERR_HALT_COMPLETE || timeout == 0) {
- dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
- return -EIO;
- }
-
- /* initiate reset */
- timeout = 100000;
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
- while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
- cpu_relax();
-
- if (timeout == 0) {
- dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
- return -EIO;
- }
-
- /* unmask interrupts */
- clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
-
- return 0;
-}
-
/*
* Init JobR independent of platform property detection
*/
@@ -262,7 +383,7 @@ static int caam_jr_init(struct device *dev)
/* Connect job ring interrupt handler. */
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
- "caam-jobr", dev);
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -318,86 +439,43 @@ static int caam_jr_init(struct device *dev)
return 0;
}
-/*
- * Shutdown JobR independent of platform property code
- */
-int caam_jr_shutdown(struct device *dev)
-{
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- dma_addr_t inpbusaddr, outbusaddr;
- int ret;
-
- ret = caam_reset_hw_jr(dev);
-
- tasklet_kill(&jrp->irqtask);
-
- /* Release interrupt */
- free_irq(jrp->irq, dev);
-
- /* Free rings */
- inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
- outbusaddr = rd_reg64(&jrp->rregs->outring_base);
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- jrp->inpring, inpbusaddr);
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
- jrp->outring, outbusaddr);
- kfree(jrp->entinfo);
- of_device_unregister(jrp->jr_pdev);
-
- return ret;
-}
/*
- * Probe routine for each detected JobR subsystem. It assumes that
- * property detection was picked up externally.
+ * Probe routine for each detected JobR subsystem.
*/
-int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
- int ring)
+static int caam_jr_probe(struct platform_device *pdev)
{
- struct device *ctrldev, *jrdev;
- struct platform_device *jr_pdev;
- struct caam_drv_private *ctrlpriv;
+ struct device *jrdev;
+ struct device_node *nprop;
+ struct caam_job_ring __iomem *ctrl;
struct caam_drv_private_jr *jrpriv;
- u32 *jroffset;
+ static int total_jobrs;
int error;
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
-
+ jrdev = &pdev->dev;
jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
GFP_KERNEL);
- if (jrpriv == NULL) {
- dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
- ring);
+ if (!jrpriv)
return -ENOMEM;
- }
- jrpriv->parentdev = ctrldev; /* point back to parent */
- jrpriv->ridx = ring; /* save ring identity relative to detection */
- /*
- * Derive a pointer to the detected JobRs regs
- * Driver has already iomapped the entire space, we just
- * need to add in the offset to this JobR. Don't know if I
- * like this long-term, but it'll run
- */
- jroffset = (u32 *)of_get_property(np, "reg", NULL);
- jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
- + *jroffset);
+ dev_set_drvdata(jrdev, jrpriv);
- /* Build a local dev for each detected queue */
- jr_pdev = of_platform_device_create(np, NULL, ctrldev);
- if (jr_pdev == NULL) {
- kfree(jrpriv);
- return -EINVAL;
+ /* save ring identity relative to detection */
+ jrpriv->ridx = total_jobrs++;
+
+ nprop = pdev->dev.of_node;
+ /* Get configuration properties from device tree */
+ /* First, get register page */
+ ctrl = of_iomap(nprop, 0);
+ if (!ctrl) {
+ dev_err(jrdev, "of_iomap() failed\n");
+ return -ENOMEM;
}
- jrpriv->jr_pdev = jr_pdev;
- jrdev = &jr_pdev->dev;
- dev_set_drvdata(jrdev, jrpriv);
- ctrlpriv->jrdev[ring] = jrdev;
+ jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
- if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring"))
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
dma_set_mask(jrdev, DMA_BIT_MASK(40));
else
dma_set_mask(jrdev, DMA_BIT_MASK(36));
@@ -405,15 +483,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
dma_set_mask(jrdev, DMA_BIT_MASK(32));
/* Identify the interrupt */
- jrpriv->irq = irq_of_parse_and_map(np, 0);
+ jrpriv->irq = irq_of_parse_and_map(nprop, 0);
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
- of_device_unregister(jr_pdev);
kfree(jrpriv);
return error;
}
- return error;
+ jrpriv->dev = jrdev;
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ atomic_set(&jrpriv->tfm_count, 0);
+
+ return 0;
+}
+
+static struct of_device_id caam_jr_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0-job-ring",
+ },
+ {
+ .compatible = "fsl,sec4.0-job-ring",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, caam_jr_match);
+
+static struct platform_driver caam_jr_driver = {
+ .driver = {
+ .name = "caam_jr",
+ .owner = THIS_MODULE,
+ .of_match_table = caam_jr_match,
+ },
+ .probe = caam_jr_probe,
+ .remove = caam_jr_remove,
+};
+
+static int __init jr_driver_init(void)
+{
+ spin_lock_init(&driver_data.jr_alloc_lock);
+ INIT_LIST_HEAD(&driver_data.jr_list);
+ return platform_driver_register(&caam_jr_driver);
+}
+
+static void __exit jr_driver_exit(void)
+{
+ platform_driver_unregister(&caam_jr_driver);
}
+
+module_init(jr_driver_init);
+module_exit(jr_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM JR request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index 9d8741a59037..97113a6d6c58 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -8,12 +8,11 @@
#define JR_H
/* Prototypes for backend-level services exposed to APIs */
+struct device *caam_jr_alloc(void);
+void caam_jr_free(struct device *rdev);
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status,
void *areq),
void *areq);
-extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
- int ring);
-extern int caam_jr_shutdown(struct device *dev);
#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4455396918de..d50174f45b21 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -245,7 +245,7 @@ struct rngtst {
/* RNG4 TRNG test registers */
struct rng4tst {
-#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
u32 rtmctl; /* misc. control register */
u32 rtscmisc; /* statistical check misc. register */
u32 rtpkrrng; /* poker range register */
@@ -255,6 +255,8 @@ struct rng4tst {
};
#define RTSDCTL_ENT_DLY_SHIFT 16
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+#define RTSDCTL_ENT_DLY_MIN 1200
+#define RTSDCTL_ENT_DLY_MAX 12800
u32 rtsdctl; /* seed control register */
union {
u32 rtsblim; /* PRGM=1: sparse bit limit register */
@@ -266,7 +268,11 @@ struct rng4tst {
u32 rtfrqcnt; /* PRGM=0: freq. count register */
};
u32 rsvd1[40];
+#define RDSTA_SKVT 0x80000000
+#define RDSTA_SKVN 0x40000000
#define RDSTA_IF0 0x00000001
+#define RDSTA_IF1 0x00000002
+#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
u32 rdsta;
u32 rsvd2[15];
};
@@ -692,6 +698,7 @@ struct caam_deco {
u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
u32 jr_ctl_lo;
u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
+#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
u32 op_status_lo;
u32 rsvd24[2];
@@ -706,12 +713,13 @@ struct caam_deco {
u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
u32 rscvd30[193];
+#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
+#define DESC_DBG_DECO_STAT_VALID 0x80000000
+#define DESC_DBG_DECO_STAT_MASK 0x00F00000
u32 desc_dbg; /* DxDDR - DECO Debug Register */
u32 rsvd31[126];
};
-/* DECO DBG Register Valid Bit*/
-#define DECO_DBG_VALID 0x80000000
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index e0037c8ee243..b12ff85f4241 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
return nents;
}
+/* Map SG page in kernel virtual address space and copy */
+static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
+ int len, int offset)
+{
+ u8 *mapped_addr;
+
+ /*
+ * Page here can be user-space pinned using get_user_pages
+ * Same must be kmapped before use and kunmapped subsequently
+ */
+ mapped_addr = kmap_atomic(sg_page(sg));
+ memcpy(dest, mapped_addr + offset, len);
+ kunmap_atomic(mapped_addr);
+}
+
/* Copy from len bytes of sg to dest, starting from beginning */
static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
{
@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
int cpy_index = 0, next_cpy_index = current_sg->length;
while (next_cpy_index < len) {
- memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
- current_sg->length);
+ sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
+ current_sg->offset);
current_sg = scatterwalk_sg_next(current_sg);
cpy_index = next_cpy_index;
next_cpy_index += current_sg->length;
}
if (cpy_index < len)
- memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
- len - cpy_index);
+ sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
+ current_sg->offset);
}
/* Copy sg data, from to_skip to end, to dest */
@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
int to_skip, unsigned int end)
{
struct scatterlist *current_sg = sg;
- int sg_index, cpy_index;
+ int sg_index, cpy_index, offset;
sg_index = current_sg->length;
while (sg_index <= to_skip) {
@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
sg_index += current_sg->length;
}
cpy_index = sg_index - to_skip;
- memcpy(dest, (u8 *) sg_virt(current_sg) +
- current_sg->length - cpy_index, cpy_index);
- current_sg = scatterwalk_sg_next(current_sg);
- if (end - sg_index)
+ offset = current_sg->offset + current_sg->length - cpy_index;
+ sg_map_copy(dest, current_sg, cpy_index, offset);
+ if (end - sg_index) {
+ current_sg = scatterwalk_sg_next(current_sg);
sg_copy(dest + cpy_index, current_sg, end - sg_index);
+ }
}
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
index a8a7dd4b0d25..247ab8048f5b 100644
--- a/drivers/crypto/dcp.c
+++ b/drivers/crypto/dcp.c
@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
- return -ENXIO;
- }
- dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
- resource_size(r));
+ dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(dev->dcp_regs_base))
+ return PTR_ERR(dev->dcp_regs_base);
dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
udelay(10);
@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev)
return -EIO;
}
dev->dcp_vmi_irq = r->start;
- ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev);
+ ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
+ "dcp", dev);
if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (0)\n");
return -EIO;
@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!r) {
dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
- ret = -EIO;
- goto err_free_irq0;
+ return -EIO;
}
dev->dcp_irq = r->start;
- ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev);
+ ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
+ dev);
if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (1)\n");
- ret = -EIO;
- goto err_free_irq0;
+ return -EIO;
}
dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!dev->hw_pkg[0]) {
dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
- ret = -ENOMEM;
- goto err_free_irq1;
+ return -ENOMEM;
}
for (i = 1; i < DCP_MAX_PKG; i++) {
@@ -848,16 +844,14 @@ err_unregister:
for (j = 0; j < i; j++)
crypto_unregister_alg(&algs[j]);
err_free_key_iv:
+ tasklet_kill(&dev->done_task);
+ tasklet_kill(&dev->queue_task);
dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
dev->payload_base_dma);
err_free_hw_packet:
dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
dev->hw_phys_pkg);
-err_free_irq1:
- free_irq(dev->dcp_irq, dev);
-err_free_irq0:
- free_irq(dev->dcp_vmi_irq, dev);
return ret;
}
@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev)
int j;
dev = platform_get_drvdata(pdev);
- dma_free_coherent(&pdev->dev,
- DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
- dev->hw_pkg[0], dev->hw_phys_pkg);
-
- dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
- dev->payload_base_dma);
+ misc_deregister(&dev->dcp_bootstream_misc);
- free_irq(dev->dcp_irq, dev);
- free_irq(dev->dcp_vmi_irq, dev);
+ for (j = 0; j < ARRAY_SIZE(algs); j++)
+ crypto_unregister_alg(&algs[j]);
tasklet_kill(&dev->done_task);
tasklet_kill(&dev->queue_task);
- for (j = 0; j < ARRAY_SIZE(algs); j++)
- crypto_unregister_alg(&algs[j]);
+ dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
+ dev->payload_base_dma);
- misc_deregister(&dev->dcp_bootstream_misc);
+ dma_free_coherent(&pdev->dev,
+ DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
+ dev->hw_pkg[0], dev->hw_phys_pkg);
return 0;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 214357e12dc0..f757a0f428bd 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
- struct crypto_authenc_key_param *param;
+ struct crypto_authenc_keys keys;
- if (!RTA_OK(rta, keylen))
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- ctx->enckey_len = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
- if (keylen < ctx->enckey_len)
+ if (keys.enckeylen > sizeof(ctx->enckey))
goto badkey;
- ctx->authkey_len = keylen - ctx->enckey_len;
- memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
- memcpy(ctx->authkey, key, ctx->authkey_len);
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- ctx->enckey_len = 0;
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1418,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i, err ;
+ int i, err;
pdev = platform_device_register_full(&ixp_dev_info);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- dev = &pdev->dev;
-
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 3374a3ebe4c7..8d1e6f8e9e9c 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
}
-irqreturn_t crypto_int(int irq, void *priv)
+static irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv)
return IRQ_HANDLED;
}
-struct crypto_alg mv_aes_alg_ecb = {
+static struct crypto_alg mv_aes_alg_ecb = {
.cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes",
.cra_priority = 300,
@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = {
},
};
-struct crypto_alg mv_aes_alg_cbc = {
+static struct crypto_alg mv_aes_alg_cbc = {
.cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes",
.cra_priority = 300,
@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {
},
};
-struct ahash_alg mv_sha1_alg = {
+static struct ahash_alg mv_sha1_alg = {
.init = mv_hash_init,
.update = mv_hash_update,
.final = mv_hash_final,
@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {
}
};
-struct ahash_alg mv_hmac_sha1_alg = {
+static struct ahash_alg mv_hmac_sha1_alg = {
.init = mv_hash_init,
.update = mv_hash_update,
.final = mv_hash_final,
@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev)
goto err_unmap_sram;
}
- ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
cp);
if (ret)
goto err_thread;
@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = {
.driver = {
.owner = THIS_MODULE,
.name = "mv_crypto",
- .of_match_table = of_match_ptr(mv_cesa_of_match_table),
+ .of_match_table = mv_cesa_of_match_table,
},
};
MODULE_ALIAS("platform:mv_crypto");
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce791c2f81f7..a9ccbf14096e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
if (dd->flags & FLAGS_CTR) {
- val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32;
+ val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
}
if (dd->flags & FLAGS_ENCRYPT)
@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
return err;
}
-int omap_aes_check_aligned(struct scatterlist *sg)
+static int omap_aes_check_aligned(struct scatterlist *sg)
{
while (sg) {
if (!IS_ALIGNED(sg->offset, 4))
@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg)
return 0;
}
-int omap_aes_copy_sgs(struct omap_aes_dev *dd)
+static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
{
void *buf_in, *buf_out;
int pages;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e28104b4aab0..e45aaaf0db30 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver);
MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Dmitry Kasatkin");
+MODULE_ALIAS("platform:omap-sham");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 888f7f4a6d3f..a6175ba6d238 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen, enckeylen;
+ struct crypto_authenc_keys keys;
int err = -EINVAL;
- if (!RTA_OK(rta, keylen))
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ if (keys.enckeylen > AES_MAX_KEY_SIZE)
goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
-
- if (enckeylen > AES_MAX_KEY_SIZE)
+ if (keys.authkeylen > sizeof(ctx->hash_ctx))
goto badkey;
if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
SPA_CTRL_CIPH_ALG_AES)
- err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
+ err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
else
- err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
+ err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
if (err)
goto badkey;
- memcpy(ctx->hash_ctx, key, authkeylen);
- ctx->hash_key_len = authkeylen;
+ memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
+ ctx->hash_key_len = keys.authkeylen;
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d7bb8bac36e9..785a9ded7bdf 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = {
.driver = {
.name = SAHARA_NAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(sahara_dt_ids),
+ .of_match_table = sahara_dt_ids,
},
.id_table = sahara_platform_ids,
};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6cd0e6038583..b44f4ddc565c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen;
- unsigned int enckeylen;
-
- if (!RTA_OK(rta, keylen))
- goto badkey;
+ struct crypto_authenc_keys keys;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
goto badkey;
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
-
- if (keylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
-
- memcpy(&ctx->key, key, keylen);
-
- ctx->keylen = keylen;
- ctx->enckeylen = enckeylen;
- ctx->authkeylen = authkeylen;
+ ctx->keylen = keys.authkeylen + keys.enckeylen;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
return 0;
@@ -809,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev,
if (edesc->assoc_chained)
talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
- else
+ else if (areq->assoclen)
/* assoc_nents counts also for IV in non-contiguous cases */
dma_unmap_sg(dev, areq->assoc,
edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
@@ -992,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
} else {
- to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc));
+ if (areq->assoclen)
+ to_talitos_ptr(&desc->ptr[1],
+ sg_dma_address(areq->assoc));
+ else
+ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
desc->ptr[1].j_extent = 0;
}
@@ -1127,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
unsigned int authsize,
unsigned int ivsize,
int icv_stashing,
- u32 cryptoflags)
+ u32 cryptoflags,
+ bool encrypt)
{
struct talitos_edesc *edesc;
int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
@@ -1141,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return ERR_PTR(-EINVAL);
}
- if (iv)
+ if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
- if (assoc) {
+ if (assoclen) {
/*
* Currently it is assumed that iv is provided whenever assoc
* is.
@@ -1160,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
}
- src_nents = sg_count(src, cryptlen + authsize, &src_chained);
- src_nents = (src_nents == 1) ? 0 : src_nents;
-
- if (!dst) {
- dst_nents = 0;
- } else {
- if (dst == src) {
- dst_nents = src_nents;
- } else {
- dst_nents = sg_count(dst, cryptlen + authsize,
- &dst_chained);
- dst_nents = (dst_nents == 1) ? 0 : dst_nents;
- }
+ if (!dst || dst == src) {
+ src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+ src_nents = (src_nents == 1) ? 0 : src_nents;
+ dst_nents = dst ? src_nents : 0;
+ } else { /* dst && dst != src*/
+ src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
+ &src_chained);
+ src_nents = (src_nents == 1) ? 0 : src_nents;
+ dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
+ &dst_chained);
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
/*
@@ -1192,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+ if (assoc_chained)
+ talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+ else if (assoclen)
+ dma_unmap_sg(dev, assoc,
+ assoc_nents ? assoc_nents - 1 : 1,
+ DMA_TO_DEVICE);
+
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
}
@@ -1216,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
- int icv_stashing)
+ int icv_stashing, bool encrypt)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -1225,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
iv, areq->assoclen, areq->cryptlen,
ctx->authsize, ivsize, icv_stashing,
- areq->base.flags);
+ areq->base.flags, encrypt);
}
static int aead_encrypt(struct aead_request *req)
@@ -1235,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, req->iv, 0);
+ edesc = aead_edesc_alloc(req, req->iv, 0, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1258,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req)
req->cryptlen -= authsize;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, req->iv, 1);
+ edesc = aead_edesc_alloc(req, req->iv, 1, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1304,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(areq, req->giv, 0);
+ edesc = aead_edesc_alloc(areq, req->giv, 0, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1460,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
}
static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
- areq)
+ areq, bool encrypt)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
@@ -1468,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
areq->info, 0, areq->nbytes, 0, ivsize, 0,
- areq->base.flags);
+ areq->base.flags, encrypt);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq);
+ edesc = ablkcipher_edesc_alloc(areq, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1495,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq);
+ edesc = ablkcipher_edesc_alloc(areq, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1647,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
- nbytes, 0, 0, 0, areq->base.flags);
+ nbytes, 0, 0, 0, areq->base.flags, false);
}
static int ahash_init(struct ahash_request *areq)
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index fa05e3c329bd..060eecc5dbc3 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -27,6 +27,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work);
static DECLARE_WORK(aes_work, aes_workqueue_handler);
static struct workqueue_struct *aes_wq;
-extern unsigned long long tegra_chip_uid(void);
-
static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
{
return readl(dd->io_base + offset);
@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
struct tegra_aes_dev *dd = aes_dev;
struct tegra_aes_ctx *ctx = &rng_ctx;
struct tegra_aes_slot *key_slot;
- struct timespec ts;
int ret = 0;
- u64 nsec, tmp[2];
+ u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
u8 *dt;
if (!ctx || !dd) {
- dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
+ pr_err("ctx=0x%x, dd=0x%x\n",
(unsigned int)ctx, (unsigned int)dd);
return -EINVAL;
}
@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
} else {
- getnstimeofday(&ts);
- nsec = timespec_to_ns(&ts);
- do_div(nsec, 1000);
- nsec ^= dd->ctr << 56;
- dd->ctr++;
- tmp[0] = nsec;
- tmp[1] = tegra_chip_uid();
- dt = (u8 *)tmp;
+ get_random_bytes(tmp, sizeof(tmp));
+ dt = tmp;
}
memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-void tegra_aes_cra_exit(struct crypto_tfm *tfm)
+static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
{
struct tegra_aes_ctx *ctx =
crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
}
/* Initialize the vde clock */
- dd->aes_clk = clk_get(dev, "vde");
+ dd->aes_clk = devm_clk_get(dev, "vde");
if (IS_ERR(dd->aes_clk)) {
dev_err(dev, "iclock intialization failed.\n");
err = -ENODEV;
@@ -1033,8 +1026,6 @@ out:
if (dd->buf_out)
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- if (!IS_ERR(dd->aes_clk))
- clk_put(dd->aes_clk);
if (aes_wq)
destroy_workqueue(aes_wq);
spin_lock(&list_lock);
@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev)
dd->buf_in, dd->dma_buf_in);
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- clk_put(dd->aes_clk);
aes_dev = NULL;
return 0;