summaryrefslogtreecommitdiff
path: root/drivers/crypto/rockchip
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/rockchip')
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c505
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h107
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c267
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c543
4 files changed, 765 insertions, 657 deletions
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index 35d73061d156..9f6ba770a90a 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -14,235 +14,162 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/reset.h>
-static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
-{
- int err;
-
- err = clk_prepare_enable(dev->sclk);
- if (err) {
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
- __func__, __LINE__);
- goto err_return;
- }
- err = clk_prepare_enable(dev->aclk);
- if (err) {
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
- __func__, __LINE__);
- goto err_aclk;
- }
- err = clk_prepare_enable(dev->hclk);
- if (err) {
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
- __func__, __LINE__);
- goto err_hclk;
- }
- err = clk_prepare_enable(dev->dmaclk);
- if (err) {
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
- __func__, __LINE__);
- goto err_dmaclk;
- }
- return err;
-err_dmaclk:
- clk_disable_unprepare(dev->hclk);
-err_hclk:
- clk_disable_unprepare(dev->aclk);
-err_aclk:
- clk_disable_unprepare(dev->sclk);
-err_return:
- return err;
-}
+static struct rockchip_ip rocklist = {
+ .dev_list = LIST_HEAD_INIT(rocklist.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock),
+};
-static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
+struct rk_crypto_info *get_rk_crypto(void)
{
- clk_disable_unprepare(dev->dmaclk);
- clk_disable_unprepare(dev->hclk);
- clk_disable_unprepare(dev->aclk);
- clk_disable_unprepare(dev->sclk);
+ struct rk_crypto_info *first;
+
+ spin_lock(&rocklist.lock);
+ first = list_first_entry_or_null(&rocklist.dev_list,
+ struct rk_crypto_info, list);
+ list_rotate_left(&rocklist.dev_list);
+ spin_unlock(&rocklist.lock);
+ return first;
}
-static int check_alignment(struct scatterlist *sg_src,
- struct scatterlist *sg_dst,
- int align_mask)
-{
- int in, out, align;
+static const struct rk_variant rk3288_variant = {
+ .num_clks = 4,
+ .rkclks = {
+ { "sclk", 150000000},
+ }
+};
- in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
- IS_ALIGNED((uint32_t)sg_src->length, align_mask);
- if (!sg_dst)
- return in;
- out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
- IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
- align = in && out;
+static const struct rk_variant rk3328_variant = {
+ .num_clks = 3,
+};
- return (align && (sg_src->length == sg_dst->length));
-}
+static const struct rk_variant rk3399_variant = {
+ .num_clks = 3,
+};
-static int rk_load_data(struct rk_crypto_info *dev,
- struct scatterlist *sg_src,
- struct scatterlist *sg_dst)
+static int rk_crypto_get_clks(struct rk_crypto_info *dev)
{
- unsigned int count;
-
- dev->aligned = dev->aligned ?
- check_alignment(sg_src, sg_dst, dev->align_size) :
- dev->aligned;
- if (dev->aligned) {
- count = min(dev->left_bytes, sg_src->length);
- dev->left_bytes -= count;
-
- if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
- dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- dev->addr_in = sg_dma_address(sg_src);
-
- if (sg_dst) {
- if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
- dev_err(dev->dev,
- "[%s:%d] dma_map_sg(dst) error\n",
- __func__, __LINE__);
- dma_unmap_sg(dev->dev, sg_src, 1,
- DMA_TO_DEVICE);
- return -EINVAL;
- }
- dev->addr_out = sg_dma_address(sg_dst);
- }
- } else {
- count = (dev->left_bytes > PAGE_SIZE) ?
- PAGE_SIZE : dev->left_bytes;
-
- if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
- dev->addr_vir, count,
- dev->total - dev->left_bytes)) {
- dev_err(dev->dev, "[%s:%d] pcopy err\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- dev->left_bytes -= count;
- sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
- dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
- dev->addr_in = sg_dma_address(&dev->sg_tmp);
-
- if (sg_dst) {
- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
- DMA_FROM_DEVICE)) {
- dev_err(dev->dev,
- "[%s:%d] dma_map_sg(sg_tmp) error\n",
- __func__, __LINE__);
- dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
- DMA_TO_DEVICE);
- return -ENOMEM;
+ int i, j, err;
+ unsigned long cr;
+
+ dev->num_clks = devm_clk_bulk_get_all(dev->dev, &dev->clks);
+ if (dev->num_clks < dev->variant->num_clks) {
+ dev_err(dev->dev, "Missing clocks, got %d instead of %d\n",
+ dev->num_clks, dev->variant->num_clks);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->num_clks; i++) {
+ cr = clk_get_rate(dev->clks[i].clk);
+ for (j = 0; j < ARRAY_SIZE(dev->variant->rkclks); j++) {
+ if (dev->variant->rkclks[j].max == 0)
+ continue;
+ if (strcmp(dev->variant->rkclks[j].name, dev->clks[i].id))
+ continue;
+ if (cr > dev->variant->rkclks[j].max) {
+ err = clk_set_rate(dev->clks[i].clk,
+ dev->variant->rkclks[j].max);
+ if (err)
+ dev_err(dev->dev, "Fail downclocking %s from %lu to %lu\n",
+ dev->variant->rkclks[j].name, cr,
+ dev->variant->rkclks[j].max);
+ else
+ dev_info(dev->dev, "Downclocking %s from %lu to %lu\n",
+ dev->variant->rkclks[j].name, cr,
+ dev->variant->rkclks[j].max);
}
- dev->addr_out = sg_dma_address(&dev->sg_tmp);
}
}
- dev->count = count;
return 0;
}
-static void rk_unload_data(struct rk_crypto_info *dev)
+static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
{
- struct scatterlist *sg_in, *sg_out;
+ int err;
- sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
- dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
+ err = clk_bulk_prepare_enable(dev->num_clks, dev->clks);
+ if (err)
+ dev_err(dev->dev, "Could not enable clock clks\n");
- if (dev->sg_dst) {
- sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
- dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
- }
+ return err;
}
-static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
{
- struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
- u32 interrupt_status;
+ clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
+}
- spin_lock(&dev->lock);
- interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
- CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+/*
+ * Power management strategy: The device is suspended until a request
+ * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s.
+ */
+static int rk_crypto_pm_suspend(struct device *dev)
+{
+ struct rk_crypto_info *rkdev = dev_get_drvdata(dev);
- if (interrupt_status & 0x0a) {
- dev_warn(dev->dev, "DMA Error\n");
- dev->err = -EFAULT;
- }
- tasklet_schedule(&dev->done_task);
+ rk_crypto_disable_clk(rkdev);
+ reset_control_assert(rkdev->rst);
- spin_unlock(&dev->lock);
- return IRQ_HANDLED;
+ return 0;
}
-static int rk_crypto_enqueue(struct rk_crypto_info *dev,
- struct crypto_async_request *async_req)
+static int rk_crypto_pm_resume(struct device *dev)
{
- unsigned long flags;
+ struct rk_crypto_info *rkdev = dev_get_drvdata(dev);
int ret;
- spin_lock_irqsave(&dev->lock, flags);
- ret = crypto_enqueue_request(&dev->queue, async_req);
- if (dev->busy) {
- spin_unlock_irqrestore(&dev->lock, flags);
+ ret = rk_crypto_enable_clk(rkdev);
+ if (ret)
return ret;
- }
- dev->busy = true;
- spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&dev->queue_task);
- return ret;
-}
+ reset_control_deassert(rkdev->rst);
+ return 0;
-static void rk_crypto_queue_task_cb(unsigned long data)
-{
- struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
- struct crypto_async_request *async_req, *backlog;
- unsigned long flags;
- int err = 0;
+}
- dev->err = 0;
- spin_lock_irqsave(&dev->lock, flags);
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
+static const struct dev_pm_ops rk_crypto_pm_ops = {
+ SET_RUNTIME_PM_OPS(rk_crypto_pm_suspend, rk_crypto_pm_resume, NULL)
+};
- if (!async_req) {
- dev->busy = false;
- spin_unlock_irqrestore(&dev->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
+static int rk_crypto_pm_init(struct rk_crypto_info *rkdev)
+{
+ int err;
- if (backlog) {
- backlog->complete(backlog, -EINPROGRESS);
- backlog = NULL;
- }
+ pm_runtime_use_autosuspend(rkdev->dev);
+ pm_runtime_set_autosuspend_delay(rkdev->dev, 2000);
- dev->async_req = async_req;
- err = dev->start(dev);
+ err = pm_runtime_set_suspended(rkdev->dev);
if (err)
- dev->complete(dev->async_req, err);
+ return err;
+ pm_runtime_enable(rkdev->dev);
+ return err;
}
-static void rk_crypto_done_task_cb(unsigned long data)
+static void rk_crypto_pm_exit(struct rk_crypto_info *rkdev)
{
- struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+ pm_runtime_disable(rkdev->dev);
+}
- if (dev->err) {
- dev->complete(dev->async_req, dev->err);
- return;
+static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+{
+ struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
+ u32 interrupt_status;
+
+ interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+
+ dev->status = 1;
+ if (interrupt_status & 0x0a) {
+ dev_warn(dev->dev, "DMA Error\n");
+ dev->status = 0;
}
+ complete(&dev->complete);
- dev->err = dev->update(dev);
- if (dev->err)
- dev->complete(dev->async_req, dev->err);
+ return IRQ_HANDLED;
}
static struct rk_crypto_tmp *rk_cipher_algs[] = {
@@ -257,6 +184,62 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_ahash_md5,
};
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
+{
+ struct rk_crypto_info *dd;
+ unsigned int i;
+
+ spin_lock(&rocklist.lock);
+ list_for_each_entry(dd, &rocklist.dev_list, list) {
+ seq_printf(seq, "%s %s requests: %lu\n",
+ dev_driver_string(dd->dev), dev_name(dd->dev),
+ dd->nreq);
+ }
+ spin_unlock(&rocklist.lock);
+
+ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+ if (!rk_cipher_algs[i]->dev)
+ continue;
+ switch (rk_cipher_algs[i]->type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+ rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name,
+ rk_cipher_algs[i]->alg.skcipher.base.cra_name,
+ rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
+ seq_printf(seq, "\tfallback due to length: %lu\n",
+ rk_cipher_algs[i]->stat_fb_len);
+ seq_printf(seq, "\tfallback due to alignment: %lu\n",
+ rk_cipher_algs[i]->stat_fb_align);
+ seq_printf(seq, "\tfallback due to SGs: %lu\n",
+ rk_cipher_algs[i]->stat_fb_sgdiff);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name,
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
+ rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
+#endif
+
+static void register_debugfs(struct rk_crypto_info *crypto_info)
+{
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+ /* Ignore error of debugfs */
+ rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
+ rocklist.dbgfs_stats = debugfs_create_file("stats", 0444,
+ rocklist.dbgfs_dir,
+ &rocklist,
+ &rk_crypto_debugfs_fops);
+#endif
+}
+
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
{
unsigned int i, k;
@@ -264,12 +247,22 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- err = crypto_register_skcipher(
- &rk_cipher_algs[i]->alg.skcipher);
- else
- err = crypto_register_ahash(
- &rk_cipher_algs[i]->alg.hash);
+ switch (rk_cipher_algs[i]->type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(crypto_info->dev, "Register %s as %s\n",
+ rk_cipher_algs[i]->alg.skcipher.base.cra_name,
+ rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name);
+ err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ dev_info(crypto_info->dev, "Register %s as %s\n",
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name);
+ err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash);
+ break;
+ default:
+ dev_err(crypto_info->dev, "unknown algorithm\n");
+ }
if (err)
goto err_cipher_algs;
}
@@ -277,7 +270,7 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
err_cipher_algs:
for (k = 0; k < i; k++) {
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
@@ -290,22 +283,23 @@ static void rk_crypto_unregister(void)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
}
-static void rk_crypto_action(void *data)
-{
- struct rk_crypto_info *crypto_info = data;
-
- reset_control_assert(crypto_info->rst);
-}
-
static const struct of_device_id crypto_of_id_table[] = {
- { .compatible = "rockchip,rk3288-crypto" },
+ { .compatible = "rockchip,rk3288-crypto",
+ .data = &rk3288_variant,
+ },
+ { .compatible = "rockchip,rk3328-crypto",
+ .data = &rk3328_variant,
+ },
+ { .compatible = "rockchip,rk3399-crypto",
+ .data = &rk3399_variant,
+ },
{}
};
MODULE_DEVICE_TABLE(of, crypto_of_id_table);
@@ -313,7 +307,7 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_table);
static int rk_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct rk_crypto_info *crypto_info;
+ struct rk_crypto_info *crypto_info, *first;
int err = 0;
crypto_info = devm_kzalloc(&pdev->dev,
@@ -323,7 +317,16 @@ static int rk_crypto_probe(struct platform_device *pdev)
goto err_crypto;
}
- crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
+ crypto_info->dev = &pdev->dev;
+ platform_set_drvdata(pdev, crypto_info);
+
+ crypto_info->variant = of_device_get_match_data(&pdev->dev);
+ if (!crypto_info->variant) {
+ dev_err(&pdev->dev, "Missing variant\n");
+ return -EINVAL;
+ }
+
+ crypto_info->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(crypto_info->rst)) {
err = PTR_ERR(crypto_info->rst);
goto err_crypto;
@@ -333,46 +336,18 @@ static int rk_crypto_probe(struct platform_device *pdev)
usleep_range(10, 20);
reset_control_deassert(crypto_info->rst);
- err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
- if (err)
- goto err_crypto;
-
- spin_lock_init(&crypto_info->lock);
-
crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(crypto_info->reg)) {
err = PTR_ERR(crypto_info->reg);
goto err_crypto;
}
- crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
- if (IS_ERR(crypto_info->aclk)) {
- err = PTR_ERR(crypto_info->aclk);
- goto err_crypto;
- }
-
- crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
- if (IS_ERR(crypto_info->hclk)) {
- err = PTR_ERR(crypto_info->hclk);
- goto err_crypto;
- }
-
- crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
- if (IS_ERR(crypto_info->sclk)) {
- err = PTR_ERR(crypto_info->sclk);
- goto err_crypto;
- }
-
- crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(crypto_info->dmaclk)) {
- err = PTR_ERR(crypto_info->dmaclk);
+ err = rk_crypto_get_clks(crypto_info);
+ if (err)
goto err_crypto;
- }
crypto_info->irq = platform_get_irq(pdev, 0);
if (crypto_info->irq < 0) {
- dev_warn(crypto_info->dev,
- "control Interrupt is not available.\n");
err = crypto_info->irq;
goto err_crypto;
}
@@ -382,49 +357,64 @@ static int rk_crypto_probe(struct platform_device *pdev)
"rk-crypto", pdev);
if (err) {
- dev_err(crypto_info->dev, "irq request failed.\n");
+ dev_err(&pdev->dev, "irq request failed.\n");
goto err_crypto;
}
- crypto_info->dev = &pdev->dev;
- platform_set_drvdata(pdev, crypto_info);
-
- tasklet_init(&crypto_info->queue_task,
- rk_crypto_queue_task_cb, (unsigned long)crypto_info);
- tasklet_init(&crypto_info->done_task,
- rk_crypto_done_task_cb, (unsigned long)crypto_info);
- crypto_init_queue(&crypto_info->queue, 50);
+ crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true);
+ crypto_engine_start(crypto_info->engine);
+ init_completion(&crypto_info->complete);
- crypto_info->enable_clk = rk_crypto_enable_clk;
- crypto_info->disable_clk = rk_crypto_disable_clk;
- crypto_info->load_data = rk_load_data;
- crypto_info->unload_data = rk_unload_data;
- crypto_info->enqueue = rk_crypto_enqueue;
- crypto_info->busy = false;
+ err = rk_crypto_pm_init(crypto_info);
+ if (err)
+ goto err_pm;
+
+ spin_lock(&rocklist.lock);
+ first = list_first_entry_or_null(&rocklist.dev_list,
+ struct rk_crypto_info, list);
+ list_add_tail(&crypto_info->list, &rocklist.dev_list);
+ spin_unlock(&rocklist.lock);
+
+ if (!first) {
+ err = rk_crypto_register(crypto_info);
+ if (err) {
+ dev_err(dev, "Fail to register crypto algorithms");
+ goto err_register_alg;
+ }
- err = rk_crypto_register(crypto_info);
- if (err) {
- dev_err(dev, "err in register alg");
- goto err_register_alg;
+ register_debugfs(crypto_info);
}
- dev_info(dev, "Crypto Accelerator successfully registered\n");
return 0;
err_register_alg:
- tasklet_kill(&crypto_info->queue_task);
- tasklet_kill(&crypto_info->done_task);
+ rk_crypto_pm_exit(crypto_info);
+err_pm:
+ crypto_engine_exit(crypto_info->engine);
err_crypto:
+ dev_err(dev, "Crypto Accelerator not successfully registered\n");
return err;
}
static int rk_crypto_remove(struct platform_device *pdev)
{
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
-
- rk_crypto_unregister();
- tasklet_kill(&crypto_tmp->done_task);
- tasklet_kill(&crypto_tmp->queue_task);
+ struct rk_crypto_info *first;
+
+ spin_lock_bh(&rocklist.lock);
+ list_del(&crypto_tmp->list);
+ first = list_first_entry_or_null(&rocklist.dev_list,
+ struct rk_crypto_info, list);
+ spin_unlock_bh(&rocklist.lock);
+
+ if (!first) {
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+ debugfs_remove_recursive(rocklist.dbgfs_dir);
+#endif
+ rk_crypto_unregister();
+ }
+ rk_crypto_pm_exit(crypto_tmp);
+ crypto_engine_exit(crypto_tmp->engine);
return 0;
}
@@ -433,6 +423,7 @@ static struct platform_driver crypto_driver = {
.remove = rk_crypto_remove,
.driver = {
.name = "rk3288-crypto",
+ .pm = &rk_crypto_pm_ops,
.of_match_table = crypto_of_id_table,
},
};
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index 97278c2574ff..b2695258cade 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -5,9 +5,13 @@
#include <crypto/aes.h>
#include <crypto/internal/des.h>
#include <crypto/algapi.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
+#include <crypto/engine.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -184,85 +188,91 @@
#define CRYPTO_WRITE(dev, offset, val) \
writel_relaxed((val), ((dev)->reg + (offset)))
+#define RK_MAX_CLKS 4
+
+/*
+ * struct rockchip_ip - struct for managing a list of RK crypto instance
+ * @dev_list: Used for doing a list of rk_crypto_info
+ * @lock: Control access to dev_list
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct rockchip_ip {
+ struct list_head dev_list;
+ spinlock_t lock; /* Control access to dev_list */
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+};
+
+struct rk_clks {
+ const char *name;
+ unsigned long max;
+};
+
+struct rk_variant {
+ int num_clks;
+ struct rk_clks rkclks[RK_MAX_CLKS];
+};
+
struct rk_crypto_info {
+ struct list_head list;
struct device *dev;
- struct clk *aclk;
- struct clk *hclk;
- struct clk *sclk;
- struct clk *dmaclk;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *rst;
void __iomem *reg;
int irq;
- struct crypto_queue queue;
- struct tasklet_struct queue_task;
- struct tasklet_struct done_task;
- struct crypto_async_request *async_req;
- int err;
- /* device lock */
- spinlock_t lock;
-
- /* the public variable */
- struct scatterlist *sg_src;
- struct scatterlist *sg_dst;
- struct scatterlist sg_tmp;
- struct scatterlist *first;
- unsigned int left_bytes;
- void *addr_vir;
- int aligned;
- int align_size;
- size_t src_nents;
- size_t dst_nents;
- unsigned int total;
- unsigned int count;
- dma_addr_t addr_in;
- dma_addr_t addr_out;
- bool busy;
- int (*start)(struct rk_crypto_info *dev);
- int (*update)(struct rk_crypto_info *dev);
- void (*complete)(struct crypto_async_request *base, int err);
- int (*enable_clk)(struct rk_crypto_info *dev);
- void (*disable_clk)(struct rk_crypto_info *dev);
- int (*load_data)(struct rk_crypto_info *dev,
- struct scatterlist *sg_src,
- struct scatterlist *sg_dst);
- void (*unload_data)(struct rk_crypto_info *dev);
- int (*enqueue)(struct rk_crypto_info *dev,
- struct crypto_async_request *async_req);
+ const struct rk_variant *variant;
+ unsigned long nreq;
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
};
/* the private variable of hash */
struct rk_ahash_ctx {
- struct rk_crypto_info *dev;
+ struct crypto_engine_ctx enginectx;
/* for fallback */
struct crypto_ahash *fallback_tfm;
};
-/* the privete variable of hash for fallback */
+/* the private variable of hash for fallback */
struct rk_ahash_rctx {
+ struct rk_crypto_info *dev;
struct ahash_request fallback_req;
u32 mode;
+ int nrsg;
};
/* the private variable of cipher */
struct rk_cipher_ctx {
- struct rk_crypto_info *dev;
+ struct crypto_engine_ctx enginectx;
unsigned int keylen;
- u32 mode;
+ u8 key[AES_MAX_KEY_SIZE];
u8 iv[AES_BLOCK_SIZE];
+ struct crypto_skcipher *fallback_tfm;
};
-enum alg_type {
- ALG_TYPE_HASH,
- ALG_TYPE_CIPHER,
+struct rk_cipher_rctx {
+ struct rk_crypto_info *dev;
+ u8 backup_iv[AES_BLOCK_SIZE];
+ u32 mode;
+ struct skcipher_request fallback_req; // keep at the end
};
struct rk_crypto_tmp {
- struct rk_crypto_info *dev;
+ u32 type;
+ struct rk_crypto_info *dev;
union {
struct skcipher_alg skcipher;
struct ahash_alg hash;
} alg;
- enum alg_type type;
+ unsigned long stat_req;
+ unsigned long stat_fb;
+ unsigned long stat_fb_len;
+ unsigned long stat_fb_sglen;
+ unsigned long stat_fb_align;
+ unsigned long stat_fb_sgdiff;
};
extern struct rk_crypto_tmp rk_ecb_aes_alg;
@@ -276,4 +286,5 @@ extern struct rk_crypto_tmp rk_ahash_sha1;
extern struct rk_crypto_tmp rk_ahash_sha256;
extern struct rk_crypto_tmp rk_ahash_md5;
+struct rk_crypto_info *get_rk_crypto(void);
#endif
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index ed03058497bc..a78ff3dcd0b1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -9,6 +9,8 @@
* Some ideas are from marvell/cesa.c and s5p-sss.c driver.
*/
#include <linux/device.h>
+#include <asm/unaligned.h>
+#include <linux/iopoll.h>
#include "rk3288_crypto.h"
/*
@@ -16,6 +18,44 @@
* so we put the fixed hash out when met zero message.
*/
+static bool rk_ahash_need_fallback(struct ahash_request *req)
+{
+ struct scatterlist *sg;
+
+ sg = req->src;
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ return true;
+ }
+ if (sg->length % 4) {
+ return true;
+ }
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+static int rk_ahash_digest_fb(struct ahash_request *areq)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+ algt->stat_fb++;
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
static int zero_message_process(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -38,15 +78,9 @@ static int zero_message_process(struct ahash_request *req)
return 0;
}
-static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
+static void rk_ahash_reg_init(struct ahash_request *req,
+ struct rk_crypto_info *dev)
{
- if (base->complete)
- base->complete(base, err);
-}
-
-static void rk_ahash_reg_init(struct rk_crypto_info *dev)
-{
- struct ahash_request *req = ahash_request_cast(dev->async_req);
struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
int reg_status;
@@ -74,7 +108,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
RK_CRYPTO_BYTESWAP_BRFIFO |
RK_CRYPTO_BYTESWAP_BTFIFO);
- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
}
static int rk_ahash_init(struct ahash_request *req)
@@ -164,51 +198,80 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
static int rk_ahash_digest(struct ahash_request *req)
{
- struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct rk_crypto_info *dev = tctx->dev;
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct rk_crypto_info *dev;
+ struct crypto_engine *engine;
+
+ if (rk_ahash_need_fallback(req))
+ return rk_ahash_digest_fb(req);
if (!req->nbytes)
return zero_message_process(req);
- else
- return dev->enqueue(dev, &req->base);
+
+ dev = get_rk_crypto();
+
+ rctx->dev = dev;
+ engine = dev->engine;
+
+ return crypto_transfer_hash_request_to_engine(engine, req);
}
-static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
{
- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
(RK_CRYPTO_HASH_START << 16));
}
-static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
+static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
{
- int err;
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct rk_crypto_info *rkc = rctx->dev;
+ int ret;
- err = dev->load_data(dev, dev->sg_src, NULL);
- if (!err)
- crypto_ahash_dma_start(dev);
- return err;
+ ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ if (ret <= 0)
+ return -EINVAL;
+
+ rctx->nrsg = ret;
+
+ return 0;
}
-static int rk_ahash_start(struct rk_crypto_info *dev)
+static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
{
- struct ahash_request *req = ahash_request_cast(dev->async_req);
- struct crypto_ahash *tfm;
- struct rk_ahash_rctx *rctx;
-
- dev->total = req->nbytes;
- dev->left_bytes = req->nbytes;
- dev->aligned = 0;
- dev->align_size = 4;
- dev->sg_dst = NULL;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->src_nents = sg_nents(req->src);
- rctx = ahash_request_ctx(req);
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct rk_crypto_info *rkc = rctx->dev;
+
+ dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
+ return 0;
+}
+
+static int rk_hash_run(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ struct scatterlist *sg = areq->src;
+ struct rk_crypto_info *rkc = rctx->dev;
+ int err = 0;
+ int i;
+ u32 v;
+
+ err = pm_runtime_resume_and_get(rkc->dev);
+ if (err)
+ return err;
+
rctx->mode = 0;
- tfm = crypto_ahash_reqtfm(req);
+ algt->stat_req++;
+ rkc->nreq++;
+
switch (crypto_ahash_digestsize(tfm)) {
case SHA1_DIGEST_SIZE:
rctx->mode = RK_CRYPTO_HASH_SHA1;
@@ -220,100 +283,88 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
rctx->mode = RK_CRYPTO_HASH_MD5;
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto theend;
}
- rk_ahash_reg_init(dev);
- return rk_ahash_set_data_start(dev);
-}
-
-static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
-{
- int err = 0;
- struct ahash_request *req = ahash_request_cast(dev->async_req);
- struct crypto_ahash *tfm;
-
- dev->unload_data(dev);
- if (dev->left_bytes) {
- if (dev->aligned) {
- if (sg_is_last(dev->sg_src)) {
- dev_warn(dev->dev, "[%s:%d], Lack of data\n",
- __func__, __LINE__);
- err = -ENOMEM;
- goto out_rx;
- }
- dev->sg_src = sg_next(dev->sg_src);
+ rk_ahash_reg_init(areq, rkc);
+
+ while (sg) {
+ reinit_completion(&rkc->complete);
+ rkc->status = 0;
+ crypto_ahash_dma_start(rkc, sg);
+ wait_for_completion_interruptible_timeout(&rkc->complete,
+ msecs_to_jiffies(2000));
+ if (!rkc->status) {
+ dev_err(rkc->dev, "DMA timeout\n");
+ err = -EFAULT;
+ goto theend;
}
- err = rk_ahash_set_data_start(dev);
- } else {
- /*
- * it will take some time to process date after last dma
- * transmission.
- *
- * waiting time is relative with the last date len,
- * so cannot set a fixed time here.
- * 10us makes system not call here frequently wasting
- * efficiency, and make it response quickly when dma
- * complete.
- */
- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
- udelay(10);
-
- tfm = crypto_ahash_reqtfm(req);
- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
- crypto_ahash_digestsize(tfm));
- dev->complete(dev->async_req, 0);
- tasklet_schedule(&dev->queue_task);
+ sg = sg_next(sg);
}
-out_rx:
- return err;
+ /*
+ * it will take some time to process date after last dma
+ * transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10us makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000);
+
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
+ v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
+ put_unaligned_le32(v, areq->result + i * 4);
+ }
+
+theend:
+ pm_runtime_put_autosuspend(rkc->dev);
+
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
+
+ return 0;
}
static int rk_cra_hash_init(struct crypto_tfm *tfm)
{
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
- struct rk_crypto_tmp *algt;
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
-
const char *alg_name = crypto_tfm_alg_name(tfm);
-
- algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
-
- tctx->dev = algt->dev;
- tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
- if (!tctx->dev->addr_vir) {
- dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
- return -ENOMEM;
- }
- tctx->dev->start = rk_ahash_start;
- tctx->dev->update = rk_ahash_crypto_rx;
- tctx->dev->complete = rk_ahash_crypto_complete;
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
/* for fallback */
tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tctx->fallback_tfm)) {
- dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+ dev_err(algt->dev->dev, "Could not load fallback driver.\n");
return PTR_ERR(tctx->fallback_tfm);
}
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct rk_ahash_rctx) +
crypto_ahash_reqsize(tctx->fallback_tfm));
- return tctx->dev->enable_clk(tctx->dev);
+ tctx->enginectx.op.do_one_request = rk_hash_run;
+ tctx->enginectx.op.prepare_request = rk_hash_prepare;
+ tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
+
+ return 0;
}
static void rk_cra_hash_exit(struct crypto_tfm *tfm)
{
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
- free_page((unsigned long)tctx->dev->addr_vir);
- return tctx->dev->disable_clk(tctx->dev);
+ crypto_free_ahash(tctx->fallback_tfm);
}
struct rk_crypto_tmp rk_ahash_sha1 = {
- .type = ALG_TYPE_HASH,
+ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = rk_ahash_init,
.update = rk_ahash_update,
@@ -337,13 +388,13 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
.cra_init = rk_cra_hash_init,
.cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
- }
- }
+ }
+ }
}
};
struct rk_crypto_tmp rk_ahash_sha256 = {
- .type = ALG_TYPE_HASH,
+ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = rk_ahash_init,
.update = rk_ahash_update,
@@ -367,13 +418,13 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
.cra_init = rk_cra_hash_init,
.cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
- }
- }
+ }
+ }
}
};
struct rk_crypto_tmp rk_ahash_md5 = {
- .type = ALG_TYPE_HASH,
+ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = rk_ahash_init,
.update = rk_ahash_update,
@@ -397,7 +448,7 @@ struct rk_crypto_tmp rk_ahash_md5 = {
.cra_init = rk_cra_hash_init,
.cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
- }
}
+ }
}
};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 5bbf0d2722e1..59069457582b 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -9,23 +9,94 @@
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
#include <linux/device.h>
+#include <crypto/scatterwalk.h>
#include "rk3288_crypto.h"
#define RK_CRYPTO_DEC BIT(0)
-static void rk_crypto_complete(struct crypto_async_request *base, int err)
+static int rk_cipher_need_fallback(struct skcipher_request *req)
{
- if (base->complete)
- base->complete(base, err);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct scatterlist *sgs, *sgd;
+ unsigned int stodo, dtodo, len;
+ unsigned int bs = crypto_skcipher_blocksize(tfm);
+
+ if (!req->cryptlen)
+ return true;
+
+ len = req->cryptlen;
+ sgs = req->src;
+ sgd = req->dst;
+ while (sgs && sgd) {
+ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
+ algt->stat_fb_align++;
+ return true;
+ }
+ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
+ algt->stat_fb_align++;
+ return true;
+ }
+ stodo = min(len, sgs->length);
+ if (stodo % bs) {
+ algt->stat_fb_len++;
+ return true;
+ }
+ dtodo = min(len, sgd->length);
+ if (dtodo % bs) {
+ algt->stat_fb_len++;
+ return true;
+ }
+ if (stodo != dtodo) {
+ algt->stat_fb_sgdiff++;
+ return true;
+ }
+ len -= stodo;
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
+ }
+ return false;
}
-static int rk_handle_req(struct rk_crypto_info *dev,
- struct skcipher_request *req)
+static int rk_cipher_fallback(struct skcipher_request *areq)
{
- if (!IS_ALIGNED(req->cryptlen, dev->align_size))
- return -EINVAL;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ int err;
+
+ algt->stat_fb++;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->mode & RK_CRYPTO_DEC)
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
- return dev->enqueue(dev, &req->base);
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ return err;
+}
+
+static int rk_cipher_handle_req(struct skcipher_request *req)
+{
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *rkc;
+ struct crypto_engine *engine;
+
+ if (rk_cipher_need_fallback(req))
+ return rk_cipher_fallback(req);
+
+ rkc = get_rk_crypto();
+
+ engine = rkc->engine;
+ rctx->dev = rkc;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, req);
}
static int rk_aes_setkey(struct crypto_skcipher *cipher,
@@ -38,8 +109,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
keylen != AES_KEYSIZE_256)
return -EINVAL;
ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
- return 0;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_des_setkey(struct crypto_skcipher *cipher,
@@ -53,8 +125,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher,
return err;
ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_tdes_setkey(struct crypto_skcipher *cipher,
@@ -68,161 +141,136 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher,
return err;
ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_aes_ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_AES_ECB_MODE;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_AES_ECB_MODE;
+ return rk_cipher_handle_req(req);
}
static int rk_aes_ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_aes_cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_AES_CBC_MODE;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_AES_CBC_MODE;
+ return rk_cipher_handle_req(req);
}
static int rk_aes_cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_des_ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = 0;
- return rk_handle_req(dev, req);
+ rctx->mode = 0;
+ return rk_cipher_handle_req(req);
}
static int rk_des_ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_des_cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_cipher_handle_req(req);
}
static int rk_des_cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_SELECT;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_SELECT;
+ return rk_cipher_handle_req(req);
}
static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_cipher_handle_req(req);
}
static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ return rk_cipher_handle_req(req);
}
-static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
{
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
- u32 ivsize, block, conf_reg = 0;
+ u32 block, conf_reg = 0;
block = crypto_tfm_alg_blocksize(tfm);
- ivsize = crypto_skcipher_ivsize(cipher);
if (block == DES_BLOCK_SIZE) {
- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
RK_CRYPTO_TDES_BYTESWAP_KEY |
RK_CRYPTO_TDES_BYTESWAP_IV;
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
conf_reg = RK_CRYPTO_DESSEL;
} else {
- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
RK_CRYPTO_AES_KEY_CHANGE |
RK_CRYPTO_AES_BYTESWAP_KEY |
RK_CRYPTO_AES_BYTESWAP_IV;
if (ctx->keylen == AES_KEYSIZE_192)
- ctx->mode |= RK_CRYPTO_AES_192BIT_key;
+ rctx->mode |= RK_CRYPTO_AES_192BIT_key;
else if (ctx->keylen == AES_KEYSIZE_256)
- ctx->mode |= RK_CRYPTO_AES_256BIT_key;
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
+ rctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
}
conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
RK_CRYPTO_BYTESWAP_BRFIFO;
@@ -231,189 +279,196 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
}
-static void crypto_dma_start(struct rk_crypto_info *dev)
+static void crypto_dma_start(struct rk_crypto_info *dev,
+ struct scatterlist *sgs,
+ struct scatterlist *sgd, unsigned int todo)
{
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
_SBF(RK_CRYPTO_BLOCK_START, 16));
}
-static int rk_set_data_start(struct rk_crypto_info *dev)
+static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
{
- int err;
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 ivsize = crypto_skcipher_ivsize(tfm);
- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
- dev->sg_src->offset + dev->sg_src->length - ivsize;
-
- /* Store the iv that need to be updated in chain mode.
- * And update the IV buffer to contain the next IV for decryption mode.
- */
- if (ctx->mode & RK_CRYPTO_DEC) {
- memcpy(ctx->iv, src_last_blk, ivsize);
- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
- ivsize, dev->total - ivsize);
- }
-
- err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
- if (!err)
- crypto_dma_start(dev);
- return err;
-}
-
-static int rk_ablk_start(struct rk_crypto_info *dev)
-{
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
- unsigned long flags;
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+ struct scatterlist *sgs, *sgd;
int err = 0;
+ int ivsize = crypto_skcipher_ivsize(tfm);
+ int offset;
+ u8 iv[AES_BLOCK_SIZE];
+ u8 biv[AES_BLOCK_SIZE];
+ u8 *ivtouse = areq->iv;
+ unsigned int len = areq->cryptlen;
+ unsigned int todo;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_info *rkc = rctx->dev;
- dev->left_bytes = req->cryptlen;
- dev->total = req->cryptlen;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->src_nents = sg_nents(req->src);
- dev->sg_dst = req->dst;
- dev->dst_nents = sg_nents(req->dst);
- dev->aligned = 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- rk_ablk_hw_init(dev);
- err = rk_set_data_start(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
- return err;
-}
+ err = pm_runtime_resume_and_get(rkc->dev);
+ if (err)
+ return err;
-static void rk_iv_copyback(struct rk_crypto_info *dev)
-{
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 ivsize = crypto_skcipher_ivsize(tfm);
+ algt->stat_req++;
+ rkc->nreq++;
- /* Update the IV buffer to contain the next IV for encryption mode. */
- if (!(ctx->mode & RK_CRYPTO_DEC)) {
- if (dev->aligned) {
- memcpy(req->iv, sg_virt(dev->sg_dst) +
- dev->sg_dst->length - ivsize, ivsize);
- } else {
- memcpy(req->iv, dev->addr_vir +
- dev->count - ivsize, ivsize);
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
+ offset, ivsize, 0);
}
}
-}
-static void rk_update_iv(struct rk_crypto_info *dev)
-{
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 ivsize = crypto_skcipher_ivsize(tfm);
- u8 *new_iv = NULL;
+ sgs = areq->src;
+ sgd = areq->dst;
- if (ctx->mode & RK_CRYPTO_DEC) {
- new_iv = ctx->iv;
- } else {
- new_iv = page_address(sg_page(dev->sg_dst)) +
- dev->sg_dst->offset + dev->sg_dst->length - ivsize;
+ while (sgs && sgd && len) {
+ if (!sgs->length) {
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
+ continue;
+ }
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ /* we backup last block of source to be used as IV at next step */
+ offset = sgs->length - ivsize;
+ scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
+ }
+ if (sgs == sgd) {
+ err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ } else {
+ err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+ err = 0;
+ rk_cipher_hw_init(rkc, areq);
+ if (ivsize) {
+ if (ivsize == DES_BLOCK_SIZE)
+ memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
+ else
+ memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
+ }
+ reinit_completion(&rkc->complete);
+ rkc->status = 0;
+
+ todo = min(sg_dma_len(sgs), len);
+ len -= todo;
+ crypto_dma_start(rkc, sgs, sgd, todo / 4);
+ wait_for_completion_interruptible_timeout(&rkc->complete,
+ msecs_to_jiffies(2000));
+ if (!rkc->status) {
+ dev_err(rkc->dev, "DMA timeout\n");
+ err = -EFAULT;
+ goto theend;
+ }
+ if (sgs == sgd) {
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
+ }
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ memcpy(iv, biv, ivsize);
+ ivtouse = iv;
+ } else {
+ offset = sgd->length - ivsize;
+ scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
+ ivtouse = iv;
+ }
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
}
- if (ivsize == DES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
- else if (ivsize == AES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
-}
-
-/* return:
- * true some err was occurred
- * fault no err, continue
- */
-static int rk_ablk_rx(struct rk_crypto_info *dev)
-{
- int err = 0;
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
-
- dev->unload_data(dev);
- if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
- dev->addr_vir, dev->count,
- dev->total - dev->left_bytes -
- dev->count)) {
- err = -EINVAL;
- goto out_rx;
+ if (areq->iv && ivsize > 0) {
+ offset = areq->cryptlen - ivsize;
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
}
}
- if (dev->left_bytes) {
- rk_update_iv(dev);
- if (dev->aligned) {
- if (sg_is_last(dev->sg_src)) {
- dev_err(dev->dev, "[%s:%d] Lack of data\n",
- __func__, __LINE__);
- err = -ENOMEM;
- goto out_rx;
- }
- dev->sg_src = sg_next(dev->sg_src);
- dev->sg_dst = sg_next(dev->sg_dst);
- }
- err = rk_set_data_start(dev);
+
+theend:
+ pm_runtime_put_autosuspend(rkc->dev);
+
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, areq, err);
+ local_bh_enable();
+ return 0;
+
+theend_sgs:
+ if (sgs == sgd) {
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
} else {
- rk_iv_copyback(dev);
- /* here show the calculation is over without any err */
- dev->complete(dev->async_req, 0);
- tasklet_schedule(&dev->queue_task);
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
}
-out_rx:
+theend_iv:
return err;
}
-static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
+static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
{
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt;
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
- algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ tfm->reqsize = sizeof(struct rk_cipher_rctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm);
- ctx->dev = algt->dev;
- ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
- ctx->dev->start = rk_ablk_start;
- ctx->dev->update = rk_ablk_rx;
- ctx->dev->complete = rk_crypto_complete;
- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+ ctx->enginectx.op.do_one_request = rk_cipher_run;
- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
+ return 0;
}
-static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
+static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
{
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- free_page((unsigned long)ctx->dev->addr_vir);
- ctx->dev->disable_clk(ctx->dev);
+ memzero_explicit(ctx->key, ctx->keylen);
+ crypto_free_skcipher(ctx->fallback_tfm);
}
struct rk_crypto_tmp rk_ecb_aes_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x0f,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = rk_aes_setkey,
@@ -423,19 +478,19 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
};
struct rk_crypto_tmp rk_cbc_aes_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x0f,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -446,19 +501,19 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
};
struct rk_crypto_tmp rk_ecb_des_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = rk_des_setkey,
@@ -468,19 +523,19 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
};
struct rk_crypto_tmp rk_cbc_des_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -491,19 +546,19 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
};
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3-ede-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = rk_tdes_setkey,
@@ -513,19 +568,19 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
};
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-ede-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,