From 4776d381276ddbdec01381c4ae18150263d41ced Mon Sep 17 00:00:00 2001 From: Himangi Saraogi Date: Tue, 27 May 2014 23:55:48 +0530 Subject: crypto: caam - Introduce the use of the managed version of kzalloc This patch moves data allocated using kzalloc to managed data allocated using devm_kzalloc and cleans now unnecessary kfrees in probe and remove functions. Also, linux/device.h is added to make sure the devm_*() routine declarations are unambiguously available. Earlier, in the probe function ctrlpriv was leaked on the failure of ctrl = of_iomap(nprop, 0); as well as on the failure of ctrlpriv->jrpdev = kzalloc(...); . These two bugs have been fixed by the patch. The following Coccinelle semantic patch was used for making the change: identifier p, probefn, removefn; @@ struct platform_driver p = { .probe = probefn, .remove = removefn, }; @prb@ identifier platform.probefn, pdev; expression e, e1, e2; @@ probefn(struct platform_device *pdev, ...) { <+... - e = kzalloc(e1, e2) + e = devm_kzalloc(&pdev->dev, e1, e2) ... ?-kfree(e); ...+> } @rem depends on prb@ identifier platform.removefn; expression e; @@ removefn(...) { <... - kfree(e); ...> } Signed-off-by: Himangi Saraogi Acked-by: Julia Lawall Reviewed-by: Marek Vasut Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 1c38f86bf63a..34ffc35e3a09 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -5,6 +5,7 @@ * Copyright 2008-2012 Freescale Semiconductor, Inc. */ +#include #include #include @@ -295,9 +296,6 @@ static int caam_remove(struct platform_device *pdev) /* Unmap controller region */ iounmap(&topregs->ctrl); - kfree(ctrlpriv->jrpdev); - kfree(ctrlpriv); - return ret; } @@ -382,7 +380,8 @@ static int caam_probe(struct platform_device *pdev) #endif u64 cha_vid; - ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); + ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), + GFP_KERNEL); if (!ctrlpriv) return -ENOMEM; @@ -432,8 +431,9 @@ static int caam_probe(struct platform_device *pdev) of_device_is_compatible(np, "fsl,sec4.0-job-ring")) rspec++; - ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, - GFP_KERNEL); + ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev, + sizeof(struct platform_device *) * rspec, + GFP_KERNEL); if (ctrlpriv->jrpdev == NULL) { iounmap(&topregs->ctrl); return -ENOMEM; -- cgit v1.2.3 From 13269ec647050fd7652abd0eb05673ffe90c3e92 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 29 May 2014 09:54:35 +0200 Subject: crypto: drivers - Add 2 missing __exit_p References to __exit functions must be wrapped with __exit_p. Signed-off-by: Jean Delvare Cc: "David S. Miller" Cc: Robert Jennings Cc: Marcelo Henrique Cerri Cc: Fionnuala Gunter Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_core.c | 2 +- drivers/crypto/nx/nx-842.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 37f9cc98ba17..e4c6c58fbb03 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -1292,7 +1292,7 @@ static struct platform_driver crypto4xx_driver = { .of_match_table = crypto4xx_match, }, .probe = crypto4xx_probe, - .remove = crypto4xx_remove, + .remove = __exit_p(crypto4xx_remove), }; module_platform_driver(crypto4xx_driver); diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 502edf0a2933..544f6d327ede 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -1247,7 +1247,7 @@ static struct vio_device_id nx842_driver_ids[] = { static struct vio_driver nx842_driver = { .name = MODULE_NAME, .probe = nx842_probe, - .remove = nx842_remove, + .remove = __exit_p(nx842_remove), .get_desired_dma = nx842_get_desired_dma, .id_table = nx842_driver_ids, }; -- cgit v1.2.3 From 3d77565ba5e5b9075a4f6d7d6d15996f5e582659 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 5 Jun 2014 10:17:45 -0500 Subject: crypto: ccp - Modify PCI support in prep for arm64 support Modify the PCI device support in prep for supporting the CCP as a platform device for arm64. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.h | 3 +-- drivers/crypto/ccp/ccp-pci.c | 39 ++++++++++++++------------------------- 2 files changed, 15 insertions(+), 27 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 7ec536e702ec..72bf1536b653 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -23,8 +23,6 @@ #include -#define IO_OFFSET 0x20000 - #define MAX_DMAPOOL_NAME_LEN 32 #define MAX_HW_QUEUES 5 @@ -194,6 +192,7 @@ struct ccp_device { void *dev_specific; int (*get_irq)(struct ccp_device *ccp); void (*free_irq)(struct ccp_device *ccp); + unsigned int irq; /* * I/O area used for device communication. The register mapping diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 0d746236df5e..180cc87b4dbb 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -12,8 +12,10 @@ #include #include +#include #include #include +#include #include #include #include @@ -24,6 +26,8 @@ #include "ccp-dev.h" #define IO_BAR 2 +#define IO_OFFSET 0x20000 + #define MSIX_VECTORS 2 struct ccp_msix { @@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp) if (ret) return ret; - ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev); + ccp->irq = pdev->irq; + ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); if (ret) { dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); goto e_msi; @@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) dev); pci_disable_msix(pdev); } else { - free_irq(pdev->irq, dev); + free_irq(ccp->irq, dev); pci_disable_msi(pdev); } } @@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp) struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); resource_size_t io_len; unsigned long io_flags; - int bar; io_flags = pci_resource_flags(pdev, IO_BAR); io_len = pci_resource_len(pdev, IO_BAR); if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) return IO_BAR; - for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) { - io_flags = pci_resource_flags(pdev, bar); - io_len = pci_resource_len(pdev, bar); - if ((io_flags & IORESOURCE_MEM) && - (io_len >= (IO_OFFSET + 0x800))) - return bar; - } - return -EIO; } @@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } ccp->io_regs = ccp->io_map + IO_OFFSET; - ret = dma_set_mask(dev, DMA_BIT_MASK(48)); - if (ret == 0) { - ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48)); + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) { - dev_err(dev, - "pci_set_consistent_dma_mask failed (%d)\n", + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); - goto e_bar0; - } - } else { - ret = dma_set_mask(dev, DMA_BIT_MASK(32)); - if (ret) { - dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret); - goto e_bar0; + goto e_iomap; } } @@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = ccp_init(ccp); if (ret) - goto e_bar0; + goto e_iomap; dev_notice(dev, "enabled\n"); return 0; -e_bar0: +e_iomap: pci_iounmap(pdev, ccp->io_map); e_device: -- cgit v1.2.3 From c4f4b325e9c885b11901174158d5e1ff4b19a189 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 5 Jun 2014 10:17:57 -0500 Subject: crypto: ccp - Add platform device support for arm64 Add support for the CCP on arm64 as a platform device. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- drivers/crypto/ccp/Makefile | 5 + drivers/crypto/ccp/ccp-dev.c | 34 ++++++ drivers/crypto/ccp/ccp-dev.h | 7 +- drivers/crypto/ccp/ccp-platform.c | 224 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 270 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/ccp/ccp-platform.c (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 02f177aeb16c..30756bd1a8c4 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA config CRYPTO_DEV_CCP bool "Support for AMD Cryptographic Coprocessor" - depends on X86 && PCI + depends on (X86 && PCI) || ARM64 default n help The AMD Cryptographic Coprocessor provides hardware support diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index d3505a018720..7f592d8d07bb 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -1,6 +1,11 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o ccp-objs := ccp-dev.o ccp-ops.o +ifdef CONFIG_X86 ccp-objs += ccp-pci.o +endif +ifdef CONFIG_ARM64 +ccp-objs += ccp-platform.o +endif obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 2c7816149b01..fa1ab10f960f 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -20,7 +20,9 @@ #include #include #include +#ifdef CONFIG_X86 #include +#endif #include #include "ccp-dev.h" @@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp) /* Build queue interrupt mask (two interrupts per queue) */ qim |= cmd_q->int_ok | cmd_q->int_err; +#ifdef CONFIG_ARM64 + /* For arm64 set the recommended queue cache settings */ + iowrite32(CACHE_WB_NO_ALLOC, ccp->io_regs + CMD_Q_CACHE_BASE + + (CMD_Q_CACHE_INC * i)); +#endif + dev_dbg(dev, "queue #%u available\n", i); } if (ccp->cmd_q_count == 0) { @@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp) } #endif +#ifdef CONFIG_X86 static const struct x86_cpu_id ccp_support[] = { { X86_VENDOR_AMD, 22, }, }; +#endif static int __init ccp_mod_init(void) { +#ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; int ret; @@ -589,12 +600,30 @@ static int __init ccp_mod_init(void) break; } +#endif + +#ifdef CONFIG_ARM64 + int ret; + + ret = ccp_platform_init(); + if (ret) + return ret; + + /* Don't leave the driver loaded if init failed */ + if (!ccp_get_device()) { + ccp_platform_exit(); + return -ENODEV; + } + + return 0; +#endif return -ENODEV; } static void __exit ccp_mod_exit(void) { +#ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; switch (cpuinfo->x86) { @@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void) ccp_pci_exit(); break; } +#endif + +#ifdef CONFIG_ARM64 + ccp_platform_exit(); +#endif } module_init(ccp_mod_init); diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 72bf1536b653..1c5651b09506 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -30,6 +30,8 @@ #define TRNG_RETRIES 10 +#define CACHE_WB_NO_ALLOC 0xb7 + /****** Register Mappings ******/ #define Q_MASK_REG 0x000 @@ -48,7 +50,7 @@ #define CMD_Q_INT_STATUS_BASE 0x214 #define CMD_Q_STATUS_INCR 0x20 -#define CMD_Q_CACHE 0x228 +#define CMD_Q_CACHE_BASE 0x228 #define CMD_Q_CACHE_INC 0x20 #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f); @@ -259,6 +261,9 @@ struct ccp_device { int ccp_pci_init(void); void ccp_pci_exit(void); +int ccp_platform_init(void); +void ccp_platform_exit(void); + struct ccp_device *ccp_alloc_struct(struct device *dev); int ccp_init(struct ccp_device *ccp); void ccp_destroy(struct ccp_device *ccp); diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c new file mode 100644 index 000000000000..65e58291c668 --- /dev/null +++ b/drivers/crypto/ccp/ccp-platform.c @@ -0,0 +1,224 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-dev.h" + + +static int ccp_get_irq(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + int ret; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + ccp->irq = ret; + ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); + if (ret) { + dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); + return ret; + } + + return 0; +} + +static int ccp_get_irqs(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + int ret; + + ret = ccp_get_irq(ccp); + if (!ret) + return 0; + + /* Couldn't get an interrupt */ + dev_notice(dev, "could not enable interrupts (%d)\n", ret); + + return ret; +} + +static void ccp_free_irqs(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + + free_irq(ccp->irq, dev); +} + +static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct resource *ior; + + ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (ior && (resource_size(ior) >= 0x800)) + return ior; + + return NULL; +} + +static int ccp_platform_probe(struct platform_device *pdev) +{ + struct ccp_device *ccp; + struct device *dev = &pdev->dev; + struct resource *ior; + int ret; + + ret = -ENOMEM; + ccp = ccp_alloc_struct(dev); + if (!ccp) + goto e_err; + + ccp->dev_specific = NULL; + ccp->get_irq = ccp_get_irqs; + ccp->free_irq = ccp_free_irqs; + + ior = ccp_find_mmio_area(ccp); + ccp->io_map = devm_ioremap_resource(dev, ior); + if (IS_ERR(ccp->io_map)) { + ret = PTR_ERR(ccp->io_map); + goto e_free; + } + ccp->io_regs = ccp->io_map; + + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + *(dev->dma_mask) = DMA_BIT_MASK(48); + dev->coherent_dma_mask = DMA_BIT_MASK(48); + + dev_set_drvdata(dev, ccp); + + ret = ccp_init(ccp); + if (ret) + goto e_free; + + dev_notice(dev, "enabled\n"); + + return 0; + +e_free: + kfree(ccp); + +e_err: + dev_notice(dev, "initialization failed\n"); + return ret; +} + +static int ccp_platform_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + + ccp_destroy(ccp); + + kfree(ccp); + + dev_notice(dev, "disabled\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int ccp_platform_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + ccp->suspending = 1; + + /* Wake all the queue kthreads to prepare for suspend */ + for (i = 0; i < ccp->cmd_q_count; i++) + wake_up_process(ccp->cmd_q[i].kthread); + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + /* Wait for all queue kthreads to say they're done */ + while (!ccp_queues_suspended(ccp)) + wait_event_interruptible(ccp->suspend_queue, + ccp_queues_suspended(ccp)); + + return 0; +} + +static int ccp_platform_resume(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ccp_device *ccp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + ccp->suspending = 0; + + /* Wake up all the kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].suspended = 0; + wake_up_process(ccp->cmd_q[i].kthread); + } + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + return 0; +} +#endif + +static const struct of_device_id ccp_platform_ids[] = { + { .compatible = "amd,ccp-seattle-v1a" }, + { }, +}; + +static struct platform_driver ccp_platform_driver = { + .driver = { + .name = "AMD Cryptographic Coprocessor", + .owner = THIS_MODULE, + .of_match_table = ccp_platform_ids, + }, + .probe = ccp_platform_probe, + .remove = ccp_platform_remove, +#ifdef CONFIG_PM + .suspend = ccp_platform_suspend, + .resume = ccp_platform_resume, +#endif +}; + +int ccp_platform_init(void) +{ + return platform_driver_register(&ccp_platform_driver); +} + +void ccp_platform_exit(void) +{ + platform_driver_unregister(&ccp_platform_driver); +} -- cgit v1.2.3 From d8cba25d2c68992a6e7c1d329b690a9ebe01167d Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 5 Jun 2014 13:42:39 -0700 Subject: crypto: qat - Intel(R) QAT driver framework This patch adds a common infractructure that will be used by all Intel(R) QuickAssist Technology (QAT) devices. v2 - added ./drivers/crypto/qat/Kconfig and ./drivers/crypto/qat/Makefile v4 - splits common part into more, smaller patches Acked-by: John Griffin Reviewed-by: Bruce W. Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/Kconfig | 22 + drivers/crypto/qat/Makefile | 2 + drivers/crypto/qat/qat_common/Makefile | 14 + drivers/crypto/qat/qat_common/adf_accel_devices.h | 204 +++++++++ drivers/crypto/qat/qat_common/adf_accel_engine.c | 168 ++++++++ drivers/crypto/qat/qat_common/adf_aer.c | 258 ++++++++++++ drivers/crypto/qat/qat_common/adf_cfg.c | 359 ++++++++++++++++ drivers/crypto/qat/qat_common/adf_cfg.h | 87 ++++ drivers/crypto/qat/qat_common/adf_cfg_common.h | 100 +++++ drivers/crypto/qat/qat_common/adf_cfg_strings.h | 83 ++++ drivers/crypto/qat/qat_common/adf_cfg_user.h | 94 +++++ drivers/crypto/qat/qat_common/adf_common_drv.h | 192 +++++++++ drivers/crypto/qat/qat_common/adf_ctl_drv.c | 490 ++++++++++++++++++++++ drivers/crypto/qat/qat_common/adf_dev_mgr.c | 215 ++++++++++ drivers/crypto/qat/qat_common/adf_init.c | 388 +++++++++++++++++ 15 files changed, 2676 insertions(+) create mode 100644 drivers/crypto/qat/Kconfig create mode 100644 drivers/crypto/qat/Makefile create mode 100644 drivers/crypto/qat/qat_common/Makefile create mode 100644 drivers/crypto/qat/qat_common/adf_accel_devices.h create mode 100644 drivers/crypto/qat/qat_common/adf_accel_engine.c create mode 100644 drivers/crypto/qat/qat_common/adf_aer.c create mode 100644 drivers/crypto/qat/qat_common/adf_cfg.c create mode 100644 drivers/crypto/qat/qat_common/adf_cfg.h create mode 100644 drivers/crypto/qat/qat_common/adf_cfg_common.h create mode 100644 drivers/crypto/qat/qat_common/adf_cfg_strings.h create mode 100644 drivers/crypto/qat/qat_common/adf_cfg_user.h create mode 100644 drivers/crypto/qat/qat_common/adf_common_drv.h create mode 100644 drivers/crypto/qat/qat_common/adf_ctl_drv.c create mode 100644 drivers/crypto/qat/qat_common/adf_dev_mgr.c create mode 100644 drivers/crypto/qat/qat_common/adf_init.c (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig new file mode 100644 index 000000000000..3bd3fd968de1 --- /dev/null +++ b/drivers/crypto/qat/Kconfig @@ -0,0 +1,22 @@ +config CRYPTO_DEV_QAT + tristate + select CRYPTO_AEAD + select CRYPTO_AUTHENC + select CRYPTO_ALGAPI + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + +config CRYPTO_DEV_QAT_DH895xCC + tristate "Support for Intel(R) DH895xCC" + depends on X86 && PCI + default n + select CRYPTO_DEV_QAT + help + Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_dh895xcc. diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile new file mode 100644 index 000000000000..d11481be225e --- /dev/null +++ b/drivers/crypto/qat/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ +obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile new file mode 100644 index 000000000000..e0424dc382fe --- /dev/null +++ b/drivers/crypto/qat/qat_common/Makefile @@ -0,0 +1,14 @@ +obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o +intel_qat-objs := adf_cfg.o \ + adf_ctl_drv.o \ + adf_dev_mgr.o \ + adf_init.o \ + adf_accel_engine.o \ + adf_aer.o \ + adf_transport.o \ + qat_crypto.o \ + qat_algs.o \ + qat_uclo.o \ + qat_hal.o + +intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h new file mode 100644 index 000000000000..f3206d9a41ab --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -0,0 +1,204 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_ACCEL_DEVICES_H_ +#define ADF_ACCEL_DEVICES_H_ +#include +#include +#include +#include +#include "adf_cfg_common.h" + +#define PCI_VENDOR_ID_INTEL 0x8086 +#define ADF_DH895XCC_DEVICE_NAME "dh895xcc" +#define ADF_DH895XCC_PCI_DEVICE_ID 0x435 +#define ADF_DH895XCC_PMISC_BAR 1 +#define ADF_DH895XCC_ETR_BAR 2 +#define ADF_PCI_MAX_BARS 3 +#define ADF_DEVICE_NAME_LENGTH 32 +#define ADF_ETR_MAX_RINGS_PER_BANK 16 +#define ADF_MAX_MSIX_VECTOR_NAME 16 +#define ADF_DEVICE_NAME_PREFIX "qat_" + +enum adf_accel_capabilities { + ADF_ACCEL_CAPABILITIES_NULL = 0, + ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, + ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, + ADF_ACCEL_CAPABILITIES_CIPHER = 4, + ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, + ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, + ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64, + ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 +}; + +struct adf_bar { + resource_size_t base_addr; + void __iomem *virt_addr; + resource_size_t size; +} __packed; + +struct adf_accel_msix { + struct msix_entry *entries; + char **names; +} __packed; + +struct adf_accel_pci { + struct pci_dev *pci_dev; + struct adf_accel_msix msix_entries; + struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; + uint8_t revid; + uint8_t sku; +} __packed; + +enum dev_state { + DEV_DOWN = 0, + DEV_UP +}; + +enum dev_sku_info { + DEV_SKU_1 = 0, + DEV_SKU_2, + DEV_SKU_3, + DEV_SKU_4, + DEV_SKU_UNKNOWN, +}; + +static inline const char *get_sku_info(enum dev_sku_info info) +{ + switch (info) { + case DEV_SKU_1: + return "SKU1"; + case DEV_SKU_2: + return "SKU2"; + case DEV_SKU_3: + return "SKU3"; + case DEV_SKU_4: + return "SKU4"; + case DEV_SKU_UNKNOWN: + default: + break; + } + return "Unknown SKU"; +} + +struct adf_hw_device_class { + const char *name; + const enum adf_device_type type; + uint32_t instances; +} __packed; + +struct adf_cfg_device_data; +struct adf_accel_dev; +struct adf_etr_data; +struct adf_etr_ring_data; + +struct adf_hw_device_data { + struct adf_hw_device_class *dev_class; + uint32_t (*get_accel_mask)(uint32_t fuse); + uint32_t (*get_ae_mask)(uint32_t fuse); + uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); + uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); + uint32_t (*get_num_aes)(struct adf_hw_device_data *self); + uint32_t (*get_num_accels)(struct adf_hw_device_data *self); + enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); + void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring); + void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring); + int (*alloc_irq)(struct adf_accel_dev *accel_dev); + void (*free_irq)(struct adf_accel_dev *accel_dev); + void (*enable_error_correction)(struct adf_accel_dev *accel_dev); + const char *fw_name; + uint32_t pci_dev_id; + uint32_t fuses; + uint32_t accel_capabilities_mask; + uint16_t accel_mask; + uint16_t ae_mask; + uint16_t tx_rings_mask; + uint8_t tx_rx_gap; + uint8_t instance_id; + uint8_t num_banks; + uint8_t num_accel; + uint8_t num_logical_accel; + uint8_t num_engines; +} __packed; + +/* CSR write macro */ +#define ADF_CSR_WR(csr_base, csr_offset, val) \ + __raw_writel(val, csr_base + csr_offset) + +/* CSR read macro */ +#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset) + +#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev) +#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) +#define GET_HW_DATA(accel_dev) (accel_dev->hw_device) +#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) +#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) +#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev + +struct adf_admin_comms; +struct icp_qat_fw_loader_handle; +struct adf_fw_loader_data { + struct icp_qat_fw_loader_handle *fw_loader; + const struct firmware *uof_fw; +}; + +struct adf_accel_dev { + struct adf_etr_data *transport; + struct adf_hw_device_data *hw_device; + struct adf_cfg_device_data *cfg; + struct adf_fw_loader_data *fw_loader; + struct adf_admin_comms *admin; + struct list_head crypto_list; + unsigned long status; + atomic_t ref_count; + struct dentry *debugfs_dir; + struct list_head list; + struct module *owner; + uint8_t accel_id; + uint8_t numa_node; + struct adf_accel_pci accel_pci_dev; +} __packed; +#endif diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c new file mode 100644 index 000000000000..25801fe3c375 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -0,0 +1,168 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include "adf_cfg.h" +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "icp_qat_uclo.h" + +int adf_ae_fw_load(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + void *uof_addr; + uint32_t uof_size; + + if (request_firmware(&loader_data->uof_fw, hw_device->fw_name, + &accel_dev->accel_pci_dev.pci_dev->dev)) { + pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name); + return -EFAULT; + } + + uof_size = loader_data->uof_fw->size; + uof_addr = (void *)loader_data->uof_fw->data; + if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { + pr_err("QAT: Failed to map uof\n"); + goto out_err; + } + if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { + pr_err("QAT: Failed to map uof\n"); + goto out_err; + } + return 0; + +out_err: + release_firmware(loader_data->uof_fw); + return -EFAULT; +} + +int adf_ae_fw_release(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + + release_firmware(loader_data->uof_fw); + qat_uclo_del_uof_obj(loader_data->fw_loader); + qat_hal_deinit(loader_data->fw_loader); + loader_data->fw_loader = NULL; + return 0; +} + +int adf_ae_start(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + + for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { + if (hw_data->ae_mask & (1 << ae)) { + qat_hal_start(loader_data->fw_loader, ae, 0xFF); + ae_ctr++; + } + } + pr_info("QAT: qat_dev%d started %d acceleration engines\n", + accel_dev->accel_id, ae_ctr); + return 0; +} + +int adf_ae_stop(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + + for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { + if (hw_data->ae_mask & (1 << ae)) { + qat_hal_stop(loader_data->fw_loader, ae, 0xFF); + ae_ctr++; + } + } + pr_info("QAT: qat_dev%d stopped %d acceleration engines\n", + accel_dev->accel_id, ae_ctr); + return 0; +} + +static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) +{ + struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + + qat_hal_reset(loader_data->fw_loader); + if (qat_hal_clr_reset(loader_data->fw_loader)) + return -EFAULT; + + return 0; +} + +int adf_ae_init(struct adf_accel_dev *accel_dev) +{ + struct adf_fw_loader_data *loader_data; + + loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); + if (!loader_data) + return -ENOMEM; + + accel_dev->fw_loader = loader_data; + if (qat_hal_init(accel_dev)) { + pr_err("QAT: Failed to init the AEs\n"); + kfree(loader_data); + return -EFAULT; + } + if (adf_ae_reset(accel_dev, 0)) { + pr_err("QAT: Failed to reset the AEs\n"); + qat_hal_deinit(loader_data->fw_loader); + kfree(loader_data); + return -EFAULT; + } + return 0; +} + +int adf_ae_shutdown(struct adf_accel_dev *accel_dev) +{ + kfree(accel_dev->fw_loader); + accel_dev->fw_loader = NULL; + return 0; +} diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c new file mode 100644 index 000000000000..0651678e2345 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -0,0 +1,258 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" + +static struct workqueue_struct *device_reset_wq; + +static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + pr_info("QAT: Acceleration driver hardware error detected.\n"); + if (!accel_dev) { + pr_err("QAT: Can't find acceleration device\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (state == pci_channel_io_perm_failure) { + pr_err("QAT: Can't recover from device error\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +/* reset dev data */ +struct adf_reset_dev_data { + int mode; + struct adf_accel_dev *accel_dev; + struct completion compl; + struct work_struct reset_work; +}; + +#define PPDSTAT_OFFSET 0x7E +static void adf_dev_restore(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + struct pci_dev *parent = pdev->bus->self; + uint16_t ppdstat = 0, bridge_ctl = 0; + int pending = 0; + + pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id); + pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); + pending = ppdstat & PCI_EXP_DEVSTA_TRPND; + if (pending) { + int ctr = 0; + do { + msleep(100); + pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); + pending = ppdstat & PCI_EXP_DEVSTA_TRPND; + } while (pending && ctr++ < 10); + } + + if (pending) + pr_info("QAT: Transaction still in progress. Proceeding\n"); + + pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); + bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); + msleep(100); + bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); + msleep(100); + pci_restore_state(pdev); + pci_save_state(pdev); +} + +static void adf_device_reset_worker(struct work_struct *work) +{ + struct adf_reset_dev_data *reset_data = + container_of(work, struct adf_reset_dev_data, reset_work); + struct adf_accel_dev *accel_dev = reset_data->accel_dev; + + adf_dev_restarting_notify(accel_dev); + adf_dev_stop(accel_dev); + adf_dev_restore(accel_dev); + if (adf_dev_start(accel_dev)) { + /* The device hanged and we can't restart it so stop here */ + dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); + kfree(reset_data); + WARN(1, "QAT: device restart failed. Device is unusable\n"); + return; + } + adf_dev_restarted_notify(accel_dev); + clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + + /* The dev is back alive. Notify the caller if in sync mode */ + if (reset_data->mode == ADF_DEV_RESET_SYNC) + complete(&reset_data->compl); + else + kfree(reset_data); +} + +static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, + enum adf_dev_reset_mode mode) +{ + struct adf_reset_dev_data *reset_data; + + if (adf_dev_started(accel_dev) && + !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) + return 0; + + set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC); + if (!reset_data) + return -ENOMEM; + reset_data->accel_dev = accel_dev; + init_completion(&reset_data->compl); + reset_data->mode = mode; + INIT_WORK(&reset_data->reset_work, adf_device_reset_worker); + queue_work(device_reset_wq, &reset_data->reset_work); + + /* If in sync mode wait for the result */ + if (mode == ADF_DEV_RESET_SYNC) { + int ret = 0; + /* Maximum device reset time is 10 seconds */ + unsigned long wait_jiffies = msecs_to_jiffies(10000); + unsigned long timeout = wait_for_completion_timeout( + &reset_data->compl, wait_jiffies); + if (!timeout) { + pr_err("QAT: Reset device timeout expired\n"); + ret = -EFAULT; + } + kfree(reset_data); + return ret; + } + return 0; +} + +static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Can't find acceleration device\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_cleanup_aer_uncorrect_error_status(pdev); + if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) + return PCI_ERS_RESULT_DISCONNECT; + + return PCI_ERS_RESULT_RECOVERED; +} + +static void adf_resume(struct pci_dev *pdev) +{ + pr_info("QAT: Acceleration driver reset completed\n"); + pr_info("QAT: Device is up and runnig\n"); +} + +static struct pci_error_handlers adf_err_handler = { + .error_detected = adf_error_detected, + .slot_reset = adf_slot_reset, + .resume = adf_resume, +}; + +/** + * adf_enable_aer() - Enable Advance Error Reporting for acceleration device + * @accel_dev: Pointer to acceleration device. + * @adf: PCI device driver owning the given acceleration device. + * + * Function enables PCI Advance Error Reporting for the + * QAT acceleration device accel_dev. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + adf->err_handler = &adf_err_handler; + pci_enable_pcie_error_reporting(pdev); + return 0; +} +EXPORT_SYMBOL_GPL(adf_enable_aer); + +/** + * adf_disable_aer() - Enable Advance Error Reporting for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function disables PCI Advance Error Reporting for the + * QAT acceleration device accel_dev. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_disable_aer(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + pci_disable_pcie_error_reporting(pdev); +} +EXPORT_SYMBOL_GPL(adf_disable_aer); + +int adf_init_aer(void) +{ + device_reset_wq = create_workqueue("qat_device_reset_wq"); + return (device_reset_wq == NULL) ? -EFAULT : 0; +} + +void adf_exit_aer(void) +{ + if (device_reset_wq) + destroy_workqueue(device_reset_wq); + device_reset_wq = NULL; +} diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c new file mode 100644 index 000000000000..389c0cf28e70 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -0,0 +1,359 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cfg.h" + +static DEFINE_MUTEX(qat_cfg_read_lock); + +static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos) +{ + struct adf_cfg_device_data *dev_cfg = sfile->private; + mutex_lock(&qat_cfg_read_lock); + return seq_list_start(&dev_cfg->sec_list, *pos); +} + +static int qat_dev_cfg_show(struct seq_file *sfile, void *v) +{ + struct list_head *list; + struct adf_cfg_section *sec = + list_entry(v, struct adf_cfg_section, list); + + seq_printf(sfile, "[%s]\n", sec->name); + list_for_each(list, &sec->param_head) { + struct adf_cfg_key_val *ptr = + list_entry(list, struct adf_cfg_key_val, list); + seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val); + } + return 0; +} + +static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + struct adf_cfg_device_data *dev_cfg = sfile->private; + return seq_list_next(v, &dev_cfg->sec_list, pos); +} + +static void qat_dev_cfg_stop(struct seq_file *sfile, void *v) +{ + mutex_unlock(&qat_cfg_read_lock); +} + +static const struct seq_operations qat_dev_cfg_sops = { + .start = qat_dev_cfg_start, + .next = qat_dev_cfg_next, + .stop = qat_dev_cfg_stop, + .show = qat_dev_cfg_show +}; + +static int qat_dev_cfg_open(struct inode *inode, struct file *file) +{ + int ret = seq_open(file, &qat_dev_cfg_sops); + + if (!ret) { + struct seq_file *seq_f = file->private_data; + seq_f->private = inode->i_private; + } + return ret; +} + +static const struct file_operations qat_dev_cfg_fops = { + .open = qat_dev_cfg_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +/** + * adf_cfg_dev_add() - Create an acceleration device configuration table. + * @accel_dev: Pointer to acceleration device. + * + * Function creates a configuration table for the given acceleration device. + * The table stores device specific config values. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) +{ + struct adf_cfg_device_data *dev_cfg_data; + + dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL); + if (!dev_cfg_data) + return -ENOMEM; + INIT_LIST_HEAD(&dev_cfg_data->sec_list); + init_rwsem(&dev_cfg_data->lock); + accel_dev->cfg = dev_cfg_data; + + /* accel_dev->debugfs_dir should always be non-NULL here */ + dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR, + accel_dev->debugfs_dir, + dev_cfg_data, + &qat_dev_cfg_fops); + if (!dev_cfg_data->debug) { + pr_err("QAT: Failed to create qat cfg debugfs entry.\n"); + kfree(dev_cfg_data); + accel_dev->cfg = NULL; + return -EFAULT; + } + return 0; +} +EXPORT_SYMBOL_GPL(adf_cfg_dev_add); + +static void adf_cfg_section_del_all(struct list_head *head); + +void adf_cfg_del_all(struct adf_accel_dev *accel_dev) +{ + struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; + + down_write(&dev_cfg_data->lock); + adf_cfg_section_del_all(&dev_cfg_data->sec_list); + up_write(&dev_cfg_data->lock); +} + +/** + * adf_cfg_dev_remove() - Clears acceleration device configuration table. + * @accel_dev: Pointer to acceleration device. + * + * Function removes configuration table from the given acceleration device + * and frees all allocated memory. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev) +{ + struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; + + down_write(&dev_cfg_data->lock); + adf_cfg_section_del_all(&dev_cfg_data->sec_list); + up_write(&dev_cfg_data->lock); + debugfs_remove(dev_cfg_data->debug); + kfree(dev_cfg_data); + accel_dev->cfg = NULL; +} +EXPORT_SYMBOL_GPL(adf_cfg_dev_remove); + +static void adf_cfg_keyval_add(struct adf_cfg_key_val *new, + struct adf_cfg_section *sec) +{ + list_add_tail(&new->list, &sec->param_head); +} + +static void adf_cfg_keyval_del_all(struct list_head *head) +{ + struct list_head *list_ptr, *tmp; + + list_for_each_prev_safe(list_ptr, tmp, head) { + struct adf_cfg_key_val *ptr = + list_entry(list_ptr, struct adf_cfg_key_val, list); + list_del(list_ptr); + kfree(ptr); + } +} + +static void adf_cfg_section_del_all(struct list_head *head) +{ + struct adf_cfg_section *ptr; + struct list_head *list, *tmp; + + list_for_each_prev_safe(list, tmp, head) { + ptr = list_entry(list, struct adf_cfg_section, list); + adf_cfg_keyval_del_all(&ptr->param_head); + list_del(list); + kfree(ptr); + } +} + +static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s, + const char *key) +{ + struct list_head *list; + + list_for_each(list, &s->param_head) { + struct adf_cfg_key_val *ptr = + list_entry(list, struct adf_cfg_key_val, list); + if (!strcmp(ptr->key, key)) + return ptr; + } + return NULL; +} + +static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev, + const char *sec_name) +{ + struct adf_cfg_device_data *cfg = accel_dev->cfg; + struct list_head *list; + + list_for_each(list, &cfg->sec_list) { + struct adf_cfg_section *ptr = + list_entry(list, struct adf_cfg_section, list); + if (!strcmp(ptr->name, sec_name)) + return ptr; + } + return NULL; +} + +static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, + const char *sec_name, + const char *key_name, + char *val) +{ + struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name); + struct adf_cfg_key_val *keyval = NULL; + + if (sec) + keyval = adf_cfg_key_value_find(sec, key_name); + if (keyval) { + memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES); + return 0; + } + return -1; +} + +/** + * adf_cfg_add_key_value_param() - Add key-value config entry to config table. + * @accel_dev: Pointer to acceleration device. + * @section_name: Name of the section where the param will be added + * @key: The key string + * @val: Value pain for the given @key + * @type: Type - string, int or address + * + * Function adds configuration key - value entry in the appropriate section + * in the given acceleration device + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, + const char *section_name, + const char *key, const void *val, + enum adf_cfg_val_type type) +{ + struct adf_cfg_device_data *cfg = accel_dev->cfg; + struct adf_cfg_key_val *key_val; + struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, + section_name); + if (!section) + return -EFAULT; + + key_val = kzalloc(sizeof(*key_val), GFP_KERNEL); + if (!key_val) + return -ENOMEM; + + INIT_LIST_HEAD(&key_val->list); + strlcpy(key_val->key, key, sizeof(key_val->key)); + + if (type == ADF_DEC) { + snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, + "%ld", (*((long *)val))); + } else if (type == ADF_STR) { + strlcpy(key_val->val, (char *)val, sizeof(key_val->val)); + } else if (type == ADF_HEX) { + snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, + "0x%lx", (unsigned long)val); + } else { + pr_err("QAT: Unknown type given.\n"); + kfree(key_val); + return -1; + } + key_val->type = type; + down_write(&cfg->lock); + adf_cfg_keyval_add(key_val, section); + up_write(&cfg->lock); + return 0; +} +EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param); + +/** + * adf_cfg_section_add() - Add config section entry to config table. + * @accel_dev: Pointer to acceleration device. + * @name: Name of the section + * + * Function adds configuration section where key - value entries + * will be stored. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) +{ + struct adf_cfg_device_data *cfg = accel_dev->cfg; + struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name); + + if (sec) + return 0; + + sec = kzalloc(sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + + strlcpy(sec->name, name, sizeof(sec->name)); + INIT_LIST_HEAD(&sec->param_head); + down_write(&cfg->lock); + list_add_tail(&sec->list, &cfg->sec_list); + up_write(&cfg->lock); + return 0; +} +EXPORT_SYMBOL_GPL(adf_cfg_section_add); + +int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, + const char *section, const char *name, + char *value) +{ + struct adf_cfg_device_data *cfg = accel_dev->cfg; + int ret; + + down_read(&cfg->lock); + ret = adf_cfg_key_val_get(accel_dev, section, name, value); + up_read(&cfg->lock); + return ret; +} diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h new file mode 100644 index 000000000000..6a9c6f6b5ec9 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg.h @@ -0,0 +1,87 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_CFG_H_ +#define ADF_CFG_H_ + +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_cfg_common.h" +#include "adf_cfg_strings.h" + +struct adf_cfg_key_val { + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + enum adf_cfg_val_type type; + struct list_head list; +}; + +struct adf_cfg_section { + char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; + struct list_head list; + struct list_head param_head; +}; + +struct adf_cfg_device_data { + struct list_head sec_list; + struct dentry *debug; + struct rw_semaphore lock; +}; + +int adf_cfg_dev_add(struct adf_accel_dev *accel_dev); +void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev); +int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name); +void adf_cfg_del_all(struct adf_accel_dev *accel_dev); +int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, + const char *section_name, + const char *key, const void *val, + enum adf_cfg_val_type type); +int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, + const char *section, const char *name, char *value); + +#endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h new file mode 100644 index 000000000000..88b82187ac35 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -0,0 +1,100 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_CFG_COMMON_H_ +#define ADF_CFG_COMMON_H_ + +#include +#include + +#define ADF_CFG_MAX_STR_LEN 64 +#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN +#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN +#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN +#define ADF_CFG_BASE_DEC 10 +#define ADF_CFG_BASE_HEX 16 +#define ADF_CFG_ALL_DEVICES 0xFE +#define ADF_CFG_NO_DEVICE 0xFF +#define ADF_CFG_AFFINITY_WHATEVER 0xFF +#define MAX_DEVICE_NAME_SIZE 32 +#define ADF_MAX_DEVICES 32 + +enum adf_cfg_val_type { + ADF_DEC, + ADF_HEX, + ADF_STR +}; + +enum adf_device_type { + DEV_UNKNOWN = 0, + DEV_DH895XCC, +}; + +struct adf_dev_status_info { + enum adf_device_type type; + uint8_t accel_id; + uint8_t instance_id; + uint8_t num_ae; + uint8_t num_accel; + uint8_t num_logical_accel; + uint8_t banks_per_accel; + uint8_t state; + uint8_t bus; + uint8_t dev; + uint8_t fun; + char name[MAX_DEVICE_NAME_SIZE]; +}; + +#define ADF_CTL_IOC_MAGIC 'a' +#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \ + struct adf_user_cfg_ctl_data) +#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \ + struct adf_user_cfg_ctl_data) +#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \ + struct adf_user_cfg_ctl_data) +#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t) +#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t) +#endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h new file mode 100644 index 000000000000..c7ac758ebc90 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h @@ -0,0 +1,83 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_CFG_STRINGS_H_ +#define ADF_CFG_STRINGS_H_ + +#define ADF_GENERAL_SEC "GENERAL" +#define ADF_KERNEL_SEC "KERNEL" +#define ADF_ACCEL_SEC "Accelerator" +#define ADF_NUM_CY "NumberCyInstances" +#define ADF_NUM_DC "NumberDcInstances" +#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests" +#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests" +#define ADF_RING_DC_SIZE "NumConcurrentRequests" +#define ADF_RING_ASYM_TX "RingAsymTx" +#define ADF_RING_SYM_TX "RingSymTx" +#define ADF_RING_RND_TX "RingNrbgTx" +#define ADF_RING_ASYM_RX "RingAsymRx" +#define ADF_RING_SYM_RX "RinSymRx" +#define ADF_RING_RND_RX "RingNrbgRx" +#define ADF_RING_DC_TX "RingTx" +#define ADF_RING_DC_RX "RingRx" +#define ADF_ETRMGR_BANK "Bank" +#define ADF_RING_BANK_NUM "BankNumber" +#define ADF_CY "Cy" +#define ADF_DC "Dc" +#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" +#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ + ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED +#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs" +#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \ + ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER +#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses" +#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \ + ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED +#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity" +#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \ + ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY +#define ADF_ACCEL_STR "Accelerator%d" +#endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h new file mode 100644 index 000000000000..0c38a155a865 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -0,0 +1,94 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_CFG_USER_H_ +#define ADF_CFG_USER_H_ + +#include "adf_cfg_common.h" +#include "adf_cfg_strings.h" + +struct adf_user_cfg_key_val { + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + union { + char *user_val_ptr; + uint64_t padding1; + }; + union { + struct adf_user_cfg_key_val *prev; + uint64_t padding2; + }; + union { + struct adf_user_cfg_key_val *next; + uint64_t padding3; + }; + enum adf_cfg_val_type type; +}; + +struct adf_user_cfg_section { + char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; + union { + struct adf_user_cfg_key_val *params; + uint64_t padding1; + }; + union { + struct adf_user_cfg_section *prev; + uint64_t padding2; + }; + union { + struct adf_user_cfg_section *next; + uint64_t padding3; + }; +}; + +struct adf_user_cfg_ctl_data { + union { + struct adf_user_cfg_section *config_section; + uint64_t padding; + }; + uint8_t device_id; +}; +#endif diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h new file mode 100644 index 000000000000..3cea9fa26158 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -0,0 +1,192 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_DRV_H +#define ADF_DRV_H + +#include +#include +#include "adf_accel_devices.h" +#include "icp_qat_fw_loader_handle.h" +#include "icp_qat_hal.h" + +#define ADF_STATUS_RESTARTING 0 +#define ADF_STATUS_STARTING 1 +#define ADF_STATUS_CONFIGURED 2 +#define ADF_STATUS_STARTED 3 +#define ADF_STATUS_AE_INITIALISED 4 +#define ADF_STATUS_AE_UCODE_LOADED 5 +#define ADF_STATUS_AE_STARTED 6 +#define ADF_STATUS_ORPHAN_TH_RUNNING 7 +#define ADF_STATUS_IRQ_ALLOCATED 8 + +enum adf_dev_reset_mode { + ADF_DEV_RESET_ASYNC = 0, + ADF_DEV_RESET_SYNC +}; + +enum adf_event { + ADF_EVENT_INIT = 0, + ADF_EVENT_START, + ADF_EVENT_STOP, + ADF_EVENT_SHUTDOWN, + ADF_EVENT_RESTARTING, + ADF_EVENT_RESTARTED, +}; + +struct service_hndl { + int (*event_hld)(struct adf_accel_dev *accel_dev, + enum adf_event event); + unsigned long init_status; + unsigned long start_status; + char *name; + struct list_head list; + int admin; +}; + +int adf_service_register(struct service_hndl *service); +int adf_service_unregister(struct service_hndl *service); + +int adf_dev_init(struct adf_accel_dev *accel_dev); +int adf_dev_start(struct adf_accel_dev *accel_dev); +int adf_dev_stop(struct adf_accel_dev *accel_dev); +int adf_dev_shutdown(struct adf_accel_dev *accel_dev); + +int adf_ctl_dev_register(void); +void adf_ctl_dev_unregister(void); +int adf_processes_dev_register(void); +void adf_processes_dev_unregister(void); + +int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev); +void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev); +struct list_head *adf_devmgr_get_head(void); +struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); +struct adf_accel_dev *adf_devmgr_get_first(void); +struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev); +int adf_devmgr_verify_id(uint32_t id); +void adf_devmgr_get_num_dev(uint32_t *num); +int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); +int adf_dev_started(struct adf_accel_dev *accel_dev); +int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); +int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev); +int adf_ae_init(struct adf_accel_dev *accel_dev); +int adf_ae_shutdown(struct adf_accel_dev *accel_dev); +int adf_ae_fw_load(struct adf_accel_dev *accel_dev); +int adf_ae_fw_release(struct adf_accel_dev *accel_dev); +int adf_ae_start(struct adf_accel_dev *accel_dev); +int adf_ae_stop(struct adf_accel_dev *accel_dev); + +int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); +void adf_disable_aer(struct adf_accel_dev *accel_dev); +int adf_init_aer(void); +void adf_exit_aer(void); + +int adf_dev_get(struct adf_accel_dev *accel_dev); +void adf_dev_put(struct adf_accel_dev *accel_dev); +int adf_dev_in_use(struct adf_accel_dev *accel_dev); +int adf_init_etr_data(struct adf_accel_dev *accel_dev); +void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); +int qat_crypto_register(void); +int qat_crypto_unregister(void); +struct qat_crypto_instance *qat_crypto_get_instance_node(int node); +void qat_crypto_put_instance(struct qat_crypto_instance *inst); +void qat_alg_callback(void *resp); +int qat_algs_init(void); +void qat_algs_exit(void); +int qat_algs_register(void); +int qat_algs_unregister(void); + +int qat_hal_init(struct adf_accel_dev *accel_dev); +void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); +void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int ctx_mask); +void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int ctx_mask); +void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); +int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); +void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask); +int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, enum icp_qat_uof_regtype lm_type, + unsigned char mode); +int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char mode); +int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char mode); +void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, unsigned int upc); +void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int uaddr, + unsigned int words_num, uint64_t *uword); +void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int uword_addr, unsigned int words_num, + unsigned int *data); +int qat_hal_get_ins_num(void); +int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, + struct icp_qat_uof_batch_init *lm_init_header); +int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata); +int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata); +int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata); +int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + unsigned short reg_num, unsigned int regdata); +int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned short lm_addr, unsigned int value); +int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); +int qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); +int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size); +#endif diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c new file mode 100644 index 000000000000..d97069b8a8e4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -0,0 +1,490 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cfg.h" +#include "adf_cfg_common.h" +#include "adf_cfg_user.h" + +#define DEVICE_NAME "qat_adf_ctl" + +static DEFINE_MUTEX(adf_ctl_lock); +static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); + +static const struct file_operations adf_ctl_ops = { + .owner = THIS_MODULE, + .unlocked_ioctl = adf_ctl_ioctl, + .compat_ioctl = adf_ctl_ioctl, +}; + +struct adf_ctl_drv_info { + unsigned int major; + struct cdev drv_cdev; + struct class *drv_class; +}; + +static struct adf_ctl_drv_info adt_ctl_drv; + +static void adf_chr_drv_destroy(void) +{ + device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0)); + cdev_del(&adt_ctl_drv.drv_cdev); + class_destroy(adt_ctl_drv.drv_class); + unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1); +} + +static int adf_chr_drv_create(void) +{ + dev_t dev_id; + struct device *drv_device; + + if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) { + pr_err("QAT: unable to allocate chrdev region\n"); + return -EFAULT; + } + + adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME); + if (IS_ERR(adt_ctl_drv.drv_class)) { + pr_err("QAT: class_create failed for adf_ctl\n"); + goto err_chrdev_unreg; + } + adt_ctl_drv.major = MAJOR(dev_id); + cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops); + if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) { + pr_err("QAT: cdev add failed\n"); + goto err_class_destr; + } + + drv_device = device_create(adt_ctl_drv.drv_class, NULL, + MKDEV(adt_ctl_drv.major, 0), + NULL, DEVICE_NAME); + if (!drv_device) { + pr_err("QAT: failed to create device\n"); + goto err_cdev_del; + } + return 0; +err_cdev_del: + cdev_del(&adt_ctl_drv.drv_cdev); +err_class_destr: + class_destroy(adt_ctl_drv.drv_class); +err_chrdev_unreg: + unregister_chrdev_region(dev_id, 1); + return -EFAULT; +} + +static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data, + unsigned long arg) +{ + struct adf_user_cfg_ctl_data *cfg_data; + + cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL); + if (!cfg_data) + return -ENOMEM; + + /* Initialize device id to NO DEVICE as 0 is a valid device id */ + cfg_data->device_id = ADF_CFG_NO_DEVICE; + + if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) { + pr_err("QAT: failed to copy from user cfg_data.\n"); + kfree(cfg_data); + return -EIO; + } + + *ctl_data = cfg_data; + return 0; +} + +static int adf_add_key_value_data(struct adf_accel_dev *accel_dev, + const char *section, + const struct adf_user_cfg_key_val *key_val) +{ + if (key_val->type == ADF_HEX) { + long *ptr = (long *)key_val->val; + long val = *ptr; + + if (adf_cfg_add_key_value_param(accel_dev, section, + key_val->key, (void *)val, + key_val->type)) { + pr_err("QAT: failed to add keyvalue.\n"); + return -EFAULT; + } + } else { + if (adf_cfg_add_key_value_param(accel_dev, section, + key_val->key, key_val->val, + key_val->type)) { + pr_err("QAT: failed to add keyvalue.\n"); + return -EFAULT; + } + } + return 0; +} + +static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, + struct adf_user_cfg_ctl_data *ctl_data) +{ + struct adf_user_cfg_key_val key_val; + struct adf_user_cfg_key_val *params_head; + struct adf_user_cfg_section section, *section_head; + + section_head = ctl_data->config_section; + + while (section_head) { + if (copy_from_user(§ion, (void __user *)section_head, + sizeof(*section_head))) { + pr_err("QAT: failed to copy section info\n"); + goto out_err; + } + + if (adf_cfg_section_add(accel_dev, section.name)) { + pr_err("QAT: failed to add section.\n"); + goto out_err; + } + + params_head = section_head->params; + + while (params_head) { + if (copy_from_user(&key_val, (void __user *)params_head, + sizeof(key_val))) { + pr_err("QAT: Failed to copy keyvalue.\n"); + goto out_err; + } + if (adf_add_key_value_data(accel_dev, section.name, + &key_val)) { + goto out_err; + } + params_head = key_val.next; + } + section_head = section.next; + } + return 0; +out_err: + adf_cfg_del_all(accel_dev); + return -EFAULT; +} + +static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct adf_user_cfg_ctl_data *ctl_data; + struct adf_accel_dev *accel_dev; + + ret = adf_ctl_alloc_resources(&ctl_data, arg); + if (ret) + return ret; + + accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); + if (!accel_dev) { + ret = -EFAULT; + goto out; + } + + if (adf_dev_started(accel_dev)) { + ret = -EFAULT; + goto out; + } + + if (adf_copy_key_value_data(accel_dev, ctl_data)) { + ret = -EFAULT; + goto out; + } + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); +out: + kfree(ctl_data); + return ret; +} + +static int adf_ctl_is_device_in_use(int id) +{ + struct list_head *itr, *head = adf_devmgr_get_head(); + + list_for_each(itr, head) { + struct adf_accel_dev *dev = + list_entry(itr, struct adf_accel_dev, list); + + if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { + if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { + pr_info("QAT: device qat_dev%d is busy\n", + dev->accel_id); + return -EBUSY; + } + } + } + return 0; +} + +static int adf_ctl_stop_devices(uint32_t id) +{ + struct list_head *itr, *head = adf_devmgr_get_head(); + int ret = 0; + + list_for_each(itr, head) { + struct adf_accel_dev *accel_dev = + list_entry(itr, struct adf_accel_dev, list); + if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { + if (!adf_dev_started(accel_dev)) + continue; + + if (adf_dev_stop(accel_dev)) { + pr_err("QAT: Failed to stop qat_dev%d\n", id); + ret = -EFAULT; + } + } + } + return ret; +} + +static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct adf_user_cfg_ctl_data *ctl_data; + + ret = adf_ctl_alloc_resources(&ctl_data, arg); + if (ret) + return ret; + + if (adf_devmgr_verify_id(ctl_data->device_id)) { + pr_err("QAT: Device %d not found\n", ctl_data->device_id); + ret = -ENODEV; + goto out; + } + + ret = adf_ctl_is_device_in_use(ctl_data->device_id); + if (ret) + goto out; + + if (ctl_data->device_id == ADF_CFG_ALL_DEVICES) + pr_info("QAT: Stopping all acceleration devices.\n"); + else + pr_info("QAT: Stopping acceleration device qat_dev%d.\n", + ctl_data->device_id); + + ret = adf_ctl_stop_devices(ctl_data->device_id); + if (ret) + pr_err("QAT: failed to stop device.\n"); +out: + kfree(ctl_data); + return ret; +} + +static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct adf_user_cfg_ctl_data *ctl_data; + struct adf_accel_dev *accel_dev; + + ret = adf_ctl_alloc_resources(&ctl_data, arg); + if (ret) + return ret; + + accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); + if (!accel_dev) { + pr_err("QAT: Device %d not found\n", ctl_data->device_id); + ret = -ENODEV; + goto out; + } + + if (!adf_dev_started(accel_dev)) { + pr_info("QAT: Starting acceleration device qat_dev%d.\n", + ctl_data->device_id); + ret = adf_dev_start(accel_dev); + } else { + pr_info("QAT: Acceleration device qat_dev%d already started.\n", + ctl_data->device_id); + } + if (ret) { + pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id); + adf_dev_stop(accel_dev); + } +out: + kfree(ctl_data); + return ret; +} + +static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + uint32_t num_devices = 0; + + adf_devmgr_get_num_dev(&num_devices); + if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) + return -EFAULT; + + return 0; +} + +static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + struct adf_hw_device_data *hw_data; + struct adf_dev_status_info dev_info; + struct adf_accel_dev *accel_dev; + + if (copy_from_user(&dev_info, (void __user *)arg, + sizeof(struct adf_dev_status_info))) { + pr_err("QAT: failed to copy from user.\n"); + return -EFAULT; + } + + accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); + if (!accel_dev) { + pr_err("QAT: Device %d not found\n", dev_info.accel_id); + return -ENODEV; + } + hw_data = accel_dev->hw_device; + dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; + dev_info.num_ae = hw_data->get_num_aes(hw_data); + dev_info.num_accel = hw_data->get_num_accels(hw_data); + dev_info.num_logical_accel = hw_data->num_logical_accel; + dev_info.banks_per_accel = hw_data->num_banks + / hw_data->num_logical_accel; + strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); + dev_info.instance_id = hw_data->instance_id; + dev_info.type = hw_data->dev_class->type; + dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number; + dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn); + dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn); + + if (copy_to_user((void __user *)arg, &dev_info, + sizeof(struct adf_dev_status_info))) { + pr_err("QAT: failed to copy status.\n"); + return -EFAULT; + } + return 0; +} + +static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + int ret; + + if (mutex_lock_interruptible(&adf_ctl_lock)) + return -EFAULT; + + switch (cmd) { + case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS: + ret = adf_ctl_ioctl_dev_config(fp, cmd, arg); + break; + + case IOCTL_STOP_ACCEL_DEV: + ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg); + break; + + case IOCTL_START_ACCEL_DEV: + ret = adf_ctl_ioctl_dev_start(fp, cmd, arg); + break; + + case IOCTL_GET_NUM_DEVICES: + ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg); + break; + + case IOCTL_STATUS_ACCEL_DEV: + ret = adf_ctl_ioctl_get_status(fp, cmd, arg); + break; + default: + pr_err("QAT: Invalid ioclt\n"); + ret = -EFAULT; + break; + } + mutex_unlock(&adf_ctl_lock); + return ret; +} + +static int __init adf_register_ctl_device_driver(void) +{ + mutex_init(&adf_ctl_lock); + + if (qat_algs_init()) + goto err_algs_init; + + if (adf_chr_drv_create()) + goto err_chr_dev; + + if (adf_init_aer()) + goto err_aer; + + if (qat_crypto_register()) + goto err_crypto_register; + + return 0; + +err_crypto_register: + adf_exit_aer(); +err_aer: + adf_chr_drv_destroy(); +err_chr_dev: + qat_algs_exit(); +err_algs_init: + mutex_destroy(&adf_ctl_lock); + return -EFAULT; +} + +static void __exit adf_unregister_ctl_device_driver(void) +{ + adf_chr_drv_destroy(); + adf_exit_aer(); + qat_crypto_unregister(); + qat_algs_exit(); + mutex_destroy(&adf_ctl_lock); +} + +module_init(adf_register_ctl_device_driver); +module_exit(adf_unregister_ctl_device_driver); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_ALIAS("intel_qat"); diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c new file mode 100644 index 000000000000..ae71555c0868 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -0,0 +1,215 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static LIST_HEAD(accel_table); +static DEFINE_MUTEX(table_lock); +static uint32_t num_devices; + +/** + * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework + * @accel_dev: Pointer to acceleration device. + * + * Function adds acceleration device to the acceleration framework. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) +{ + struct list_head *itr; + + if (num_devices == ADF_MAX_DEVICES) { + pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES); + return -EFAULT; + } + + mutex_lock(&table_lock); + list_for_each(itr, &accel_table) { + struct adf_accel_dev *ptr = + list_entry(itr, struct adf_accel_dev, list); + + if (ptr == accel_dev) { + mutex_unlock(&table_lock); + return -EEXIST; + } + } + atomic_set(&accel_dev->ref_count, 0); + list_add_tail(&accel_dev->list, &accel_table); + accel_dev->accel_id = num_devices++; + mutex_unlock(&table_lock); + return 0; +} +EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); + +struct list_head *adf_devmgr_get_head(void) +{ + return &accel_table; +} + +/** + * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. + * @accel_dev: Pointer to acceleration device. + * + * Function removes acceleration device from the acceleration framework. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev) +{ + mutex_lock(&table_lock); + list_del(&accel_dev->list); + num_devices--; + mutex_unlock(&table_lock); +} +EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); + +struct adf_accel_dev *adf_devmgr_get_first(void) +{ + struct adf_accel_dev *dev = NULL; + + if (!list_empty(&accel_table)) + dev = list_first_entry(&accel_table, struct adf_accel_dev, + list); + return dev; +} + +/** + * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev. + * @accel_dev: Pointer to pci device. + * + * Function returns acceleration device associated with the given pci device. + * To be used by QAT device specific drivers. + * + * Return: pinter to accel_dev or NULL if not found. + */ +struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) +{ + struct list_head *itr; + + list_for_each(itr, &accel_table) { + struct adf_accel_dev *ptr = + list_entry(itr, struct adf_accel_dev, list); + + if (ptr->accel_pci_dev.pci_dev == pci_dev) { + mutex_unlock(&table_lock); + return ptr; + } + } + return NULL; +} +EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); + +struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) +{ + struct list_head *itr; + + list_for_each(itr, &accel_table) { + struct adf_accel_dev *ptr = + list_entry(itr, struct adf_accel_dev, list); + + if (ptr->accel_id == id) { + mutex_unlock(&table_lock); + return ptr; + } + } + return NULL; +} + +int adf_devmgr_verify_id(uint32_t id) +{ + if (id == ADF_CFG_ALL_DEVICES) + return 0; + + if (adf_devmgr_get_dev_by_id(id)) + return 0; + + return -ENODEV; +} + +void adf_devmgr_get_num_dev(uint32_t *num) +{ + struct list_head *itr; + + *num = 0; + list_for_each(itr, &accel_table) { + (*num)++; + } +} + +int adf_dev_in_use(struct adf_accel_dev *accel_dev) +{ + return atomic_read(&accel_dev->ref_count) != 0; +} + +int adf_dev_get(struct adf_accel_dev *accel_dev) +{ + if (atomic_add_return(1, &accel_dev->ref_count) == 1) + if (!try_module_get(accel_dev->owner)) + return -EFAULT; + return 0; +} + +void adf_dev_put(struct adf_accel_dev *accel_dev) +{ + if (atomic_sub_return(1, &accel_dev->ref_count) == 0) + module_put(accel_dev->owner); +} + +int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) +{ + return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); +} + +int adf_dev_started(struct adf_accel_dev *accel_dev) +{ + return test_bit(ADF_STATUS_STARTED, &accel_dev->status); +} diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c new file mode 100644 index 000000000000..5c3d6f12951a --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -0,0 +1,388 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static LIST_HEAD(service_table); +static DEFINE_MUTEX(service_lock); + +static void adf_service_add(struct service_hndl *service) +{ + mutex_lock(&service_lock); + list_add(&service->list, &service_table); + mutex_unlock(&service_lock); +} + +/** + * adf_service_register() - Register acceleration service in the accel framework + * @service: Pointer to the service + * + * Function adds the acceleration service to the acceleration framework. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_service_register(struct service_hndl *service) +{ + service->init_status = 0; + service->start_status = 0; + adf_service_add(service); + return 0; +} +EXPORT_SYMBOL_GPL(adf_service_register); + +static void adf_service_remove(struct service_hndl *service) +{ + mutex_lock(&service_lock); + list_del(&service->list); + mutex_unlock(&service_lock); +} + +/** + * adf_service_unregister() - Unregister acceleration service from the framework + * @service: Pointer to the service + * + * Function remove the acceleration service from the acceleration framework. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_service_unregister(struct service_hndl *service) +{ + if (service->init_status || service->start_status) { + pr_err("QAT: Could not remove active service\n"); + return -EFAULT; + } + adf_service_remove(service); + return 0; +} +EXPORT_SYMBOL_GPL(adf_service_unregister); + +/** + * adf_dev_start() - Start acceleration service for the given accel device + * @accel_dev: Pointer to acceleration device. + * + * Function notifies all the registered services that the acceleration device + * is ready to be used. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_dev_start(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + struct list_head *list_itr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + + if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) { + pr_info("QAT: Device not configured\n"); + return -EFAULT; + } + set_bit(ADF_STATUS_STARTING, &accel_dev->status); + + if (adf_ae_init(accel_dev)) { + pr_err("QAT: Failed to initialise Acceleration Engine\n"); + return -EFAULT; + } + set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); + + if (adf_ae_fw_load(accel_dev)) { + pr_err("Failed to load acceleration FW\n"); + adf_ae_fw_release(accel_dev); + return -EFAULT; + } + set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); + + if (hw_data->alloc_irq(accel_dev)) { + pr_err("QAT: Failed to allocate interrupts\n"); + return -EFAULT; + } + set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); + + /* + * Subservice initialisation is divided into two stages: init and start. + * This is to facilitate any ordering dependencies between services + * prior to starting any of the accelerators. + */ + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { + pr_err("QAT: Failed to initialise service %s\n", + service->name); + return -EFAULT; + } + set_bit(accel_dev->accel_id, &service->init_status); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { + pr_err("QAT: Failed to initialise service %s\n", + service->name); + return -EFAULT; + } + set_bit(accel_dev->accel_id, &service->init_status); + } + + hw_data->enable_error_correction(accel_dev); + + if (adf_ae_start(accel_dev)) { + pr_err("QAT: AE Start Failed\n"); + return -EFAULT; + } + set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_START)) { + pr_err("QAT: Failed to start service %s\n", + service->name); + return -EFAULT; + } + set_bit(accel_dev->accel_id, &service->start_status); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_START)) { + pr_err("QAT: Failed to start service %s\n", + service->name); + return -EFAULT; + } + set_bit(accel_dev->accel_id, &service->start_status); + } + + clear_bit(ADF_STATUS_STARTING, &accel_dev->status); + set_bit(ADF_STATUS_STARTED, &accel_dev->status); + + if (qat_algs_register()) { + pr_err("QAT: Failed to register crypto algs\n"); + set_bit(ADF_STATUS_STARTING, &accel_dev->status); + clear_bit(ADF_STATUS_STARTED, &accel_dev->status); + return -EFAULT; + } + return 0; +} +EXPORT_SYMBOL_GPL(adf_dev_start); + +/** + * adf_dev_stop() - Stop acceleration service for the given accel device + * @accel_dev: Pointer to acceleration device. + * + * Function notifies all the registered services that the acceleration device + * is shuting down. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_dev_stop(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct service_hndl *service; + struct list_head *list_itr; + int ret, wait = 0; + + if (!adf_dev_started(accel_dev) && + !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { + return 0; + } + clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + clear_bit(ADF_STATUS_STARTING, &accel_dev->status); + clear_bit(ADF_STATUS_STARTED, &accel_dev->status); + + if (qat_algs_unregister()) + pr_err("QAT: Failed to unregister crypto algs\n"); + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (!test_bit(accel_dev->accel_id, &service->start_status)) + continue; + ret = service->event_hld(accel_dev, ADF_EVENT_STOP); + if (!ret) { + clear_bit(accel_dev->accel_id, &service->start_status); + } else if (ret == -EAGAIN) { + wait = 1; + clear_bit(accel_dev->accel_id, &service->start_status); + } + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (!test_bit(accel_dev->accel_id, &service->start_status)) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_STOP)) + pr_err("QAT: Failed to shutdown service %s\n", + service->name); + else + clear_bit(accel_dev->accel_id, &service->start_status); + } + + if (wait) + msleep(100); + + if (adf_dev_started(accel_dev)) { + if (adf_ae_stop(accel_dev)) + pr_err("QAT: failed to stop AE\n"); + else + clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); + } + + if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { + if (adf_ae_fw_release(accel_dev)) + pr_err("QAT: Failed to release the ucode\n"); + else + clear_bit(ADF_STATUS_AE_UCODE_LOADED, + &accel_dev->status); + } + + if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { + if (adf_ae_shutdown(accel_dev)) + pr_err("QAT: Failed to shutdown Accel Engine\n"); + else + clear_bit(ADF_STATUS_AE_INITIALISED, + &accel_dev->status); + } + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (!test_bit(accel_dev->accel_id, &service->init_status)) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) + pr_err("QAT: Failed to shutdown service %s\n", + service->name); + else + clear_bit(accel_dev->accel_id, &service->init_status); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (!test_bit(accel_dev->accel_id, &service->init_status)) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) + pr_err("QAT: Failed to shutdown service %s\n", + service->name); + else + clear_bit(accel_dev->accel_id, &service->init_status); + } + + if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { + hw_data->free_irq(accel_dev); + clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); + } + + /* Delete configuration only if not restarting */ + if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) + adf_cfg_del_all(accel_dev); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_dev_stop); + +int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + struct list_head *list_itr; + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) + pr_err("QAT: Failed to restart service %s.\n", + service->name); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) + pr_err("QAT: Failed to restart service %s.\n", + service->name); + } + return 0; +} + +int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + struct list_head *list_itr; + + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) + pr_err("QAT: Failed to restart service %s.\n", + service->name); + } + list_for_each(list_itr, &service_table) { + service = list_entry(list_itr, struct service_hndl, list); + if (!service->admin) + continue; + if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) + pr_err("QAT: Failed to restart service %s.\n", + service->name); + } + return 0; +} -- cgit v1.2.3 From a672a9dc872ec4eea91bd331f6d55aee4ac48d4f Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 5 Jun 2014 13:42:58 -0700 Subject: crypto: qat - Intel(R) QAT transport code This patch adds a code that implements communication channel between the driver and the firmware. Acked-by: John Griffin Reviewed-by: Bruce W. Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_transport.c | 565 +++++++++++++++++++++ drivers/crypto/qat/qat_common/adf_transport.h | 63 +++ .../qat/qat_common/adf_transport_access_macros.h | 160 ++++++ .../crypto/qat/qat_common/adf_transport_debug.c | 301 +++++++++++ .../crypto/qat/qat_common/adf_transport_internal.h | 115 +++++ 5 files changed, 1204 insertions(+) create mode 100644 drivers/crypto/qat/qat_common/adf_transport.c create mode 100644 drivers/crypto/qat/qat_common/adf_transport.h create mode 100644 drivers/crypto/qat/qat_common/adf_transport_access_macros.h create mode 100644 drivers/crypto/qat/qat_common/adf_transport_debug.c create mode 100644 drivers/crypto/qat/qat_common/adf_transport_internal.h (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c new file mode 100644 index 000000000000..b21301056e3f --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -0,0 +1,565 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include "adf_accel_devices.h" +#include "adf_transport_internal.h" +#include "adf_transport_access_macros.h" +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) +{ + uint32_t div = data >> shift; + uint32_t mult = div << shift; + return data - mult; +} + +static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) +{ + if (((size - 1) & addr) != 0) + return -EFAULT; + return 0; +} + +static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) +{ + int i = ADF_MIN_RING_SIZE; + for (; i <= ADF_MAX_RING_SIZE; i++) + if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) + return i; + + return ADF_DEFAULT_RING_SIZE; +} + +static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +{ + spin_lock(&bank->lock); + if (bank->ring_mask & (1 << ring)) { + spin_unlock(&bank->lock); + return -EFAULT; + } + bank->ring_mask |= (1 << ring); + spin_unlock(&bank->lock); + return 0; +} + +static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +{ + spin_lock(&bank->lock); + bank->ring_mask &= ~(1 << ring); + spin_unlock(&bank->lock); +} + +static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +{ + spin_lock_bh(&bank->lock); + bank->irq_mask |= (1 << ring); + spin_unlock_bh(&bank->lock); + WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); + WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number, + bank->irq_coalesc_timer); +} + +static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +{ + spin_lock_bh(&bank->lock); + bank->irq_mask &= ~(1 << ring); + spin_unlock_bh(&bank->lock); + WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); +} + +int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) +{ + if (atomic_add_return(1, ring->inflights) > + ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { + atomic_dec(ring->inflights); + return -EAGAIN; + } + spin_lock_bh(&ring->lock); + memcpy(ring->base_addr + ring->tail, msg, + ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); + + ring->tail = adf_modulo(ring->tail + + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), + ADF_RING_SIZE_MODULO(ring->ring_size)); + WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number, + ring->ring_number, ring->tail); + spin_unlock_bh(&ring->lock); + return 0; +} + +static int adf_handle_response(struct adf_etr_ring_data *ring) +{ + uint32_t msg_counter = 0; + uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head); + + while (*msg != ADF_RING_EMPTY_SIG) { + ring->callback((uint32_t *)msg); + *msg = ADF_RING_EMPTY_SIG; + ring->head = adf_modulo(ring->head + + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), + ADF_RING_SIZE_MODULO(ring->ring_size)); + msg_counter++; + msg = (uint32_t *)(ring->base_addr + ring->head); + } + if (msg_counter > 0) { + WRITE_CSR_RING_HEAD(ring->bank->csr_addr, + ring->bank->bank_number, + ring->ring_number, ring->head); + atomic_sub(msg_counter, ring->inflights); + } + return 0; +} + +static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) +{ + uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size); + + WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, + ring->ring_number, ring_config); +} + +static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) +{ + uint32_t ring_config = + BUILD_RESP_RING_CONFIG(ring->ring_size, + ADF_RING_NEAR_WATERMARK_512, + ADF_RING_NEAR_WATERMARK_0); + + WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, + ring->ring_number, ring_config); +} + +static int adf_init_ring(struct adf_etr_ring_data *ring) +{ + struct adf_etr_bank_data *bank = ring->bank; + struct adf_accel_dev *accel_dev = bank->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint64_t ring_base; + uint32_t ring_size_bytes = + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); + + ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); + ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev), + ring_size_bytes, &ring->dma_addr, + GFP_KERNEL); + if (!ring->base_addr) + return -ENOMEM; + + memset(ring->base_addr, 0x7F, ring_size_bytes); + /* The base_addr has to be aligned to the size of the buffer */ + if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) { + pr_err("QAT: Ring address not aligned\n"); + dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, + ring->base_addr, ring->dma_addr); + return -EFAULT; + } + + if (hw_data->tx_rings_mask & (1 << ring->ring_number)) + adf_configure_tx_ring(ring); + + else + adf_configure_rx_ring(ring); + + ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size); + WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number, + ring->ring_number, ring_base); + spin_lock_init(&ring->lock); + return 0; +} + +static void adf_cleanup_ring(struct adf_etr_ring_data *ring) +{ + uint32_t ring_size_bytes = + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); + ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); + + if (ring->base_addr) { + memset(ring->base_addr, 0x7F, ring_size_bytes); + dma_free_coherent(&GET_DEV(ring->bank->accel_dev), + ring_size_bytes, ring->base_addr, + ring->dma_addr); + } +} + +int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, + uint32_t bank_num, uint32_t num_msgs, + uint32_t msg_size, const char *ring_name, + adf_callback_fn callback, int poll_mode, + struct adf_etr_ring_data **ring_ptr) +{ + struct adf_etr_data *transport_data = accel_dev->transport; + struct adf_etr_bank_data *bank; + struct adf_etr_ring_data *ring; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + uint32_t ring_num; + int ret; + + if (bank_num >= GET_MAX_BANKS(accel_dev)) { + pr_err("QAT: Invalid bank number\n"); + return -EFAULT; + } + if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { + pr_err("QAT: Invalid msg size\n"); + return -EFAULT; + } + if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs), + ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) { + pr_err("QAT: Invalid ring size for given msg size\n"); + return -EFAULT; + } + if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) { + pr_err("QAT: Section %s, no such entry : %s\n", + section, ring_name); + return -EFAULT; + } + if (kstrtouint(val, 10, &ring_num)) { + pr_err("QAT: Can't get ring number\n"); + return -EFAULT; + } + + bank = &transport_data->banks[bank_num]; + if (adf_reserve_ring(bank, ring_num)) { + pr_err("QAT: Ring %d, %s already exists.\n", + ring_num, ring_name); + return -EFAULT; + } + ring = &bank->rings[ring_num]; + ring->ring_number = ring_num; + ring->bank = bank; + ring->callback = callback; + ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size); + ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); + ring->head = 0; + ring->tail = 0; + atomic_set(ring->inflights, 0); + ret = adf_init_ring(ring); + if (ret) + goto err; + + /* Enable HW arbitration for the given ring */ + accel_dev->hw_device->hw_arb_ring_enable(ring); + + if (adf_ring_debugfs_add(ring, ring_name)) { + pr_err("QAT: Couldn't add ring debugfs entry\n"); + ret = -EFAULT; + goto err; + } + + /* Enable interrupts if needed */ + if (callback && (!poll_mode)) + adf_enable_ring_irq(bank, ring->ring_number); + *ring_ptr = ring; + return 0; +err: + adf_cleanup_ring(ring); + adf_unreserve_ring(bank, ring_num); + accel_dev->hw_device->hw_arb_ring_disable(ring); + return ret; +} + +void adf_remove_ring(struct adf_etr_ring_data *ring) +{ + struct adf_etr_bank_data *bank = ring->bank; + struct adf_accel_dev *accel_dev = bank->accel_dev; + + /* Disable interrupts for the given ring */ + adf_disable_ring_irq(bank, ring->ring_number); + + /* Clear PCI config space */ + WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number, + ring->ring_number, 0); + WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number, + ring->ring_number, 0); + adf_ring_debugfs_rm(ring); + adf_unreserve_ring(bank, ring->ring_number); + /* Disable HW arbitration for the given ring */ + accel_dev->hw_device->hw_arb_ring_disable(ring); + adf_cleanup_ring(ring); +} + +static void adf_ring_response_handler(struct adf_etr_bank_data *bank) +{ + uint32_t empty_rings, i; + + empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); + empty_rings = ~empty_rings & bank->irq_mask; + + for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) { + if (empty_rings & (1 << i)) + adf_handle_response(&bank->rings[i]); + } +} + +/** + * adf_response_handler() - Bottom half handler response handler + * @bank_addr: Address of a ring bank for with the BH was scheduled. + * + * Function is the bottom half handler for the response from acceleration + * device. There is one handler for every ring bank. Function checks all + * communication rings in the bank. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_response_handler(unsigned long bank_addr) +{ + struct adf_etr_bank_data *bank = (void *)bank_addr; + + /* Handle all the responses nad reenable IRQs */ + adf_ring_response_handler(bank); + WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, + bank->irq_mask); +} +EXPORT_SYMBOL_GPL(adf_response_handler); + +static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, + const char *section, const char *format, + uint32_t key, uint32_t *value) +{ + char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + + snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key); + + if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf)) + return -EFAULT; + + if (kstrtouint(val_buf, 10, value)) + return -EFAULT; + return 0; +} + +static void adf_enable_coalesc(struct adf_etr_bank_data *bank, + const char *section, uint32_t bank_num_in_accel) +{ + if (adf_get_cfg_int(bank->accel_dev, section, + ADF_ETRMGR_COALESCE_TIMER_FORMAT, + bank_num_in_accel, &bank->irq_coalesc_timer)) + bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; + + if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer || + ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer) + bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; +} + +static int adf_init_bank(struct adf_accel_dev *accel_dev, + struct adf_etr_bank_data *bank, + uint32_t bank_num, void __iomem *csr_addr) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_etr_ring_data *ring; + struct adf_etr_ring_data *tx_ring; + uint32_t i, coalesc_enabled; + + memset(bank, 0, sizeof(*bank)); + bank->bank_number = bank_num; + bank->csr_addr = csr_addr; + bank->accel_dev = accel_dev; + spin_lock_init(&bank->lock); + + /* Enable IRQ coalescing always. This will allow to use + * the optimised flag and coalesc register. + * If it is disabled in the config file just use min time value */ + if (adf_get_cfg_int(accel_dev, "Accelerator0", + ADF_ETRMGR_COALESCING_ENABLED_FORMAT, + bank_num, &coalesc_enabled) && coalesc_enabled) + adf_enable_coalesc(bank, "Accelerator0", bank_num); + else + bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME; + + for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { + WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0); + WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); + ring = &bank->rings[i]; + if (hw_data->tx_rings_mask & (1 << i)) { + ring->inflights = kzalloc_node(sizeof(atomic_t), + GFP_KERNEL, + accel_dev->numa_node); + if (!ring->inflights) + goto err; + } else { + if (i < hw_data->tx_rx_gap) { + pr_err("QAT: Invalid tx rings mask config\n"); + goto err; + } + tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; + ring->inflights = tx_ring->inflights; + } + } + if (adf_bank_debugfs_add(bank)) { + pr_err("QAT: Failed to add bank debugfs entry\n"); + goto err; + } + + WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); + return 0; +err: + for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { + ring = &bank->rings[i]; + if (hw_data->tx_rings_mask & (1 << i) && ring->inflights) + kfree(ring->inflights); + } + return -ENOMEM; +} + +/** + * adf_init_etr_data() - Initialize transport rings for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function is the initializes the communications channels (rings) to the + * acceleration device accel_dev. + * To be used by QAT device specific drivers. + * + * Return: 0 on success, error code othewise. + */ +int adf_init_etr_data(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *etr_data; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *csr_addr; + uint32_t size; + uint32_t num_banks = 0; + int i, ret; + + etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, + accel_dev->numa_node); + if (!etr_data) + return -ENOMEM; + + num_banks = GET_MAX_BANKS(accel_dev); + size = num_banks * sizeof(struct adf_etr_bank_data); + etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); + if (!etr_data->banks) { + ret = -ENOMEM; + goto err_bank; + } + + accel_dev->transport = etr_data; + i = hw_data->get_etr_bar_id(hw_data); + csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; + + /* accel_dev->debugfs_dir should always be non-NULL here */ + etr_data->debug = debugfs_create_dir("transport", + accel_dev->debugfs_dir); + if (!etr_data->debug) { + pr_err("QAT: Unable to create transport debugfs entry\n"); + ret = -ENOENT; + goto err_bank_debug; + } + + for (i = 0; i < num_banks; i++) { + ret = adf_init_bank(accel_dev, &etr_data->banks[i], i, + csr_addr); + if (ret) + goto err_bank_all; + } + + return 0; + +err_bank_all: + debugfs_remove(etr_data->debug); +err_bank_debug: + kfree(etr_data->banks); +err_bank: + kfree(etr_data); + accel_dev->transport = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(adf_init_etr_data); + +static void cleanup_bank(struct adf_etr_bank_data *bank) +{ + uint32_t i; + + for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { + struct adf_accel_dev *accel_dev = bank->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_etr_ring_data *ring = &bank->rings[i]; + + if (bank->ring_mask & (1 << i)) + adf_cleanup_ring(ring); + + if (hw_data->tx_rings_mask & (1 << i)) + kfree(ring->inflights); + } + adf_bank_debugfs_rm(bank); + memset(bank, 0, sizeof(*bank)); +} + +static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *etr_data = accel_dev->transport; + uint32_t i, num_banks = GET_MAX_BANKS(accel_dev); + + for (i = 0; i < num_banks; i++) + cleanup_bank(&etr_data->banks[i]); +} + +/** + * adf_cleanup_etr_data() - Clear transport rings for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function is the clears the communications channels (rings) of the + * acceleration device accel_dev. + * To be used by QAT device specific drivers. + * + * Return: void + */ +void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *etr_data = accel_dev->transport; + + if (etr_data) { + adf_cleanup_etr_handles(accel_dev); + debugfs_remove(etr_data->debug); + kfree(etr_data->banks); + kfree(etr_data); + accel_dev->transport = NULL; + } +} +EXPORT_SYMBOL_GPL(adf_cleanup_etr_data); diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h new file mode 100644 index 000000000000..386485bd9c95 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport.h @@ -0,0 +1,63 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_TRANSPORT_H +#define ADF_TRANSPORT_H + +#include "adf_accel_devices.h" + +struct adf_etr_ring_data; + +typedef void (*adf_callback_fn)(void *resp_msg); + +int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, + uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size, + const char *ring_name, adf_callback_fn callback, + int poll_mode, struct adf_etr_ring_data **ring_ptr); + +int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg); +void adf_remove_ring(struct adf_etr_ring_data *ring); +#endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h new file mode 100644 index 000000000000..91d88d676580 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -0,0 +1,160 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_TRANSPORT_ACCESS_MACROS_H +#define ADF_TRANSPORT_ACCESS_MACROS_H + +#include "adf_accel_devices.h" +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A +#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 +#define ADF_COALESCING_MIN_TIME 0x1FF +#define ADF_COALESCING_MAX_TIME 0xFFFFF +#define ADF_COALESCING_DEF_TIME 0x27FF +#define ADF_RING_NEAR_WATERMARK_512 0x08 +#define ADF_RING_NEAR_WATERMARK_0 0x00 +#define ADF_RING_EMPTY_SIG 0x7F7F7F7F + +/* Valid internal ring size values */ +#define ADF_RING_SIZE_128 0x01 +#define ADF_RING_SIZE_256 0x02 +#define ADF_RING_SIZE_512 0x03 +#define ADF_RING_SIZE_4K 0x06 +#define ADF_RING_SIZE_16K 0x08 +#define ADF_RING_SIZE_4M 0x10 +#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 +#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M +#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K + +/* Valid internal msg size values internal */ +#define ADF_MSG_SIZE_32 0x01 +#define ADF_MSG_SIZE_64 0x02 +#define ADF_MSG_SIZE_128 0x04 +#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 +#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 + +/* Size to bytes conversion macros for ring and msg values */ +#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) +#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) +#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) +#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) + +/* Minimum ring bufer size for memory allocation */ +#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ + ADF_RING_SIZE_4K : SIZE) +#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) +#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ + ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1) +#define BUILD_RING_CONFIG(size) \ + ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ + ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_CONFIG + (ring << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + uint32_t l_base = 0, u_base = 0; \ + l_base = (uint32_t)(value & 0xFFFFFFFF); \ + u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ +} while (0) +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2), value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | value) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) +#endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c new file mode 100644 index 000000000000..d83a581a9135 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -0,0 +1,301 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_transport_internal.h" +#include "adf_transport_access_macros.h" + +static DEFINE_MUTEX(ring_read_lock); +static DEFINE_MUTEX(bank_read_lock); + +static void *adf_ring_start(struct seq_file *sfile, loff_t *pos) +{ + struct adf_etr_ring_data *ring = sfile->private; + mutex_lock(&ring_read_lock); + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / + ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) + return NULL; + + return ring->base_addr + + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); +} + +static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + struct adf_etr_ring_data *ring = sfile->private; + + if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / + ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) + return NULL; + + return ring->base_addr + + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); +} + +static int adf_ring_show(struct seq_file *sfile, void *v) +{ + struct adf_etr_ring_data *ring = sfile->private; + struct adf_etr_bank_data *bank = ring->bank; + uint32_t *msg = v; + void __iomem *csr = ring->bank->csr_addr; + int i, x; + + if (v == SEQ_START_TOKEN) { + int head, tail, empty; + + head = READ_CSR_RING_HEAD(csr, bank->bank_number, + ring->ring_number); + tail = READ_CSR_RING_TAIL(csr, bank->bank_number, + ring->ring_number); + empty = READ_CSR_E_STAT(csr, bank->bank_number); + + seq_puts(sfile, "------- Ring configuration -------\n"); + seq_printf(sfile, "ring num %d, bank num %d\n", + ring->ring_number, ring->bank->bank_number); + seq_printf(sfile, "head %x, tail %x, empty: %d\n", + head, tail, (empty & 1 << ring->ring_number) + >> ring->ring_number); + seq_printf(sfile, "ring size %d, msg size %d\n", + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size), + ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); + seq_puts(sfile, "----------- Ring data ------------\n"); + return 0; + } + seq_printf(sfile, "%p:", msg); + x = 0; + i = 0; + for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) { + seq_printf(sfile, " %08X", *(msg + i)); + if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 && + (++x == 8)) { + seq_printf(sfile, "\n%p:", msg + i + 1); + x = 0; + } + } + seq_puts(sfile, "\n"); + return 0; +} + +static void adf_ring_stop(struct seq_file *sfile, void *v) +{ + mutex_unlock(&ring_read_lock); +} + +static const struct seq_operations adf_ring_sops = { + .start = adf_ring_start, + .next = adf_ring_next, + .stop = adf_ring_stop, + .show = adf_ring_show +}; + +static int adf_ring_open(struct inode *inode, struct file *file) +{ + int ret = seq_open(file, &adf_ring_sops); + + if (!ret) { + struct seq_file *seq_f = file->private_data; + seq_f->private = inode->i_private; + } + return ret; +} + +static const struct file_operations adf_ring_debug_fops = { + .open = adf_ring_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) +{ + struct adf_etr_ring_debug_entry *ring_debug; + char entry_name[8]; + + ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL); + if (!ring_debug) + return -ENOMEM; + + strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); + snprintf(entry_name, sizeof(entry_name), "ring_%02d", + ring->ring_number); + + ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR, + ring->bank->bank_debug_dir, + ring, &adf_ring_debug_fops); + if (!ring_debug->debug) { + pr_err("QAT: Failed to create ring debug entry.\n"); + kfree(ring_debug); + return -EFAULT; + } + ring->ring_debug = ring_debug; + return 0; +} + +void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) +{ + if (ring->ring_debug) { + debugfs_remove(ring->ring_debug->debug); + kfree(ring->ring_debug); + ring->ring_debug = NULL; + } +} + +static void *adf_bank_start(struct seq_file *sfile, loff_t *pos) +{ + mutex_lock(&bank_read_lock); + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK) + return NULL; + + return pos; +} + +static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK) + return NULL; + + return pos; +} + +static int adf_bank_show(struct seq_file *sfile, void *v) +{ + struct adf_etr_bank_data *bank = sfile->private; + + if (v == SEQ_START_TOKEN) { + seq_printf(sfile, "------- Bank %d configuration -------\n", + bank->bank_number); + } else { + int ring_id = *((int *)v) - 1; + struct adf_etr_ring_data *ring = &bank->rings[ring_id]; + void __iomem *csr = bank->csr_addr; + int head, tail, empty; + + if (!(bank->ring_mask & 1 << ring_id)) + return 0; + + head = READ_CSR_RING_HEAD(csr, bank->bank_number, + ring->ring_number); + tail = READ_CSR_RING_TAIL(csr, bank->bank_number, + ring->ring_number); + empty = READ_CSR_E_STAT(csr, bank->bank_number); + + seq_printf(sfile, + "ring num %02d, head %04x, tail %04x, empty: %d\n", + ring->ring_number, head, tail, + (empty & 1 << ring->ring_number) >> + ring->ring_number); + } + return 0; +} + +static void adf_bank_stop(struct seq_file *sfile, void *v) +{ + mutex_unlock(&bank_read_lock); +} + +static const struct seq_operations adf_bank_sops = { + .start = adf_bank_start, + .next = adf_bank_next, + .stop = adf_bank_stop, + .show = adf_bank_show +}; + +static int adf_bank_open(struct inode *inode, struct file *file) +{ + int ret = seq_open(file, &adf_bank_sops); + + if (!ret) { + struct seq_file *seq_f = file->private_data; + seq_f->private = inode->i_private; + } + return ret; +} + +static const struct file_operations adf_bank_debug_fops = { + .open = adf_bank_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) +{ + struct adf_accel_dev *accel_dev = bank->accel_dev; + struct dentry *parent = accel_dev->transport->debug; + char name[8]; + + snprintf(name, sizeof(name), "bank_%02d", bank->bank_number); + bank->bank_debug_dir = debugfs_create_dir(name, parent); + if (!bank->bank_debug_dir) { + pr_err("QAT: Failed to create bank debug dir.\n"); + return -EFAULT; + } + + bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR, + bank->bank_debug_dir, bank, + &adf_bank_debug_fops); + if (!bank->bank_debug_cfg) { + pr_err("QAT: Failed to create bank debug entry.\n"); + debugfs_remove(bank->bank_debug_dir); + return -EFAULT; + } + return 0; +} + +void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank) +{ + debugfs_remove(bank->bank_debug_cfg); + debugfs_remove(bank->bank_debug_dir); +} diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h new file mode 100644 index 000000000000..4eb8969243bf --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -0,0 +1,115 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_TRANSPORT_INTRN_H +#define ADF_TRANSPORT_INTRN_H + +#include +#include +#include +#include "adf_transport.h" + +struct adf_etr_ring_debug_entry { + char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + struct dentry *debug; +}; + +struct adf_etr_ring_data { + void *base_addr; + atomic_t *inflights; + spinlock_t lock; /* protects ring data struct */ + adf_callback_fn callback; + struct adf_etr_bank_data *bank; + dma_addr_t dma_addr; + uint16_t head; + uint16_t tail; + uint8_t ring_number; + uint8_t ring_size; + uint8_t msg_size; + uint8_t reserved; + struct adf_etr_ring_debug_entry *ring_debug; +} __packed; + +struct adf_etr_bank_data { + struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; + struct tasklet_struct resp_hanlder; + void __iomem *csr_addr; + struct adf_accel_dev *accel_dev; + uint32_t irq_coalesc_timer; + uint16_t ring_mask; + uint16_t irq_mask; + spinlock_t lock; /* protects bank data struct */ + struct dentry *bank_debug_dir; + struct dentry *bank_debug_cfg; + uint32_t bank_number; +} __packed; + +struct adf_etr_data { + struct adf_etr_bank_data *banks; + struct dentry *debug; +}; + +void adf_response_handler(unsigned long bank_addr); +#ifdef CONFIG_DEBUG_FS +#include +int adf_bank_debugfs_add(struct adf_etr_bank_data *bank); +void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank); +int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name); +void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring); +#else +static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) +{ + return 0; +} +#define adf_bank_debugfs_rm(bank) do {} while (0) +static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, + const char *name) +{ + return 0; +} +#define adf_ring_debugfs_rm(ring) do {} while (0) +#endif +#endif -- cgit v1.2.3 From 38154e65ce258196c411fc5385eaa13c2fa0eab2 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 5 Jun 2014 13:43:15 -0700 Subject: crypto: qat - Intel(R) QAT FW interface This patch adds FW interface structure definitions. Acked-by: John Griffin Reviewed-by: Bruce W. Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/icp_qat_fw.h | 316 ++++++++++++++++ .../crypto/qat/qat_common/icp_qat_fw_init_admin.h | 131 +++++++ drivers/crypto/qat/qat_common/icp_qat_fw_la.h | 403 +++++++++++++++++++++ drivers/crypto/qat/qat_common/icp_qat_hw.h | 305 ++++++++++++++++ 4 files changed, 1155 insertions(+) create mode 100644 drivers/crypto/qat/qat_common/icp_qat_fw.h create mode 100644 drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h create mode 100644 drivers/crypto/qat/qat_common/icp_qat_fw_la.h create mode 100644 drivers/crypto/qat/qat_common/icp_qat_hw.h (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h new file mode 100644 index 000000000000..f1e30e24a419 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -0,0 +1,316 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_FW_H_ +#define _ICP_QAT_FW_H_ +#include +#include "icp_qat_hw.h" + +#define QAT_FIELD_SET(flags, val, bitpos, mask) \ +{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ + (((val) & (mask)) << (bitpos))) ; } + +#define QAT_FIELD_GET(flags, bitpos, mask) \ + (((flags) >> (bitpos)) & (mask)) + +#define ICP_QAT_FW_REQ_DEFAULT_SZ 128 +#define ICP_QAT_FW_RESP_DEFAULT_SZ 32 +#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 +#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF +#define ICP_QAT_FW_NUM_LONGWORDS_1 1 +#define ICP_QAT_FW_NUM_LONGWORDS_2 2 +#define ICP_QAT_FW_NUM_LONGWORDS_3 3 +#define ICP_QAT_FW_NUM_LONGWORDS_4 4 +#define ICP_QAT_FW_NUM_LONGWORDS_5 5 +#define ICP_QAT_FW_NUM_LONGWORDS_6 6 +#define ICP_QAT_FW_NUM_LONGWORDS_7 7 +#define ICP_QAT_FW_NUM_LONGWORDS_10 10 +#define ICP_QAT_FW_NUM_LONGWORDS_13 13 +#define ICP_QAT_FW_NULL_REQ_SERV_ID 1 + +enum icp_qat_fw_comn_resp_serv_id { + ICP_QAT_FW_COMN_RESP_SERV_NULL, + ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, + ICP_QAT_FW_COMN_RESP_SERV_DELIMITER +}; + +enum icp_qat_fw_comn_request_id { + ICP_QAT_FW_COMN_REQ_NULL = 0, + ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, + ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, + ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, + ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, + ICP_QAT_FW_COMN_REQ_DELIMITER +}; + +struct icp_qat_fw_comn_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t serv_specif_fields[4]; + } s1; + } u; +}; + +struct icp_qat_fw_comn_req_mid { + uint64_t opaque_data; + uint64_t src_data_addr; + uint64_t dest_data_addr; + uint32_t src_length; + uint32_t dst_length; +}; + +struct icp_qat_fw_comn_req_cd_ctrl { + uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; +}; + +struct icp_qat_fw_comn_req_hdr { + uint8_t resrvd1; + uint8_t service_cmd_id; + uint8_t service_type; + uint8_t hdr_flags; + uint16_t serv_specif_flags; + uint16_t comn_req_flags; +}; + +struct icp_qat_fw_comn_req_rqpars { + uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; +}; + +struct icp_qat_fw_comn_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +struct icp_qat_fw_comn_error { + uint8_t xlat_err_code; + uint8_t cmp_err_code; +}; + +struct icp_qat_fw_comn_resp_hdr { + uint8_t resrvd1; + uint8_t service_id; + uint8_t response_type; + uint8_t hdr_flags; + struct icp_qat_fw_comn_error comn_error; + uint8_t comn_status; + uint8_t cmd_id; +}; + +struct icp_qat_fw_comn_resp { + struct icp_qat_fw_comn_resp_hdr comn_hdr; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 +#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 +#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_type + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_type = val + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id = val + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ + ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ + ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) + +#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ + (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) + +#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ + (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) + +#define QAT_COMN_PTR_TYPE_BITPOS 0 +#define QAT_COMN_PTR_TYPE_MASK 0x1 +#define QAT_COMN_CD_FLD_TYPE_BITPOS 1 +#define QAT_COMN_CD_FLD_TYPE_MASK 0x1 +#define QAT_COMN_PTR_TYPE_FLAT 0x0 +#define QAT_COMN_PTR_TYPE_SGL 0x1 +#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 +#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 + +#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ + ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ + | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) + +#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ + QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 +#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 +#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 +#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F + +#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } + +#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } + +#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 +#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 +#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 +#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 +#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 + +#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \ + ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \ + QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ + (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \ + QAT_COMN_RESP_CMP_STATUS_BITPOS) | \ + (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \ + QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \ + (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \ + QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS)) + +#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ + QAT_COMN_RESP_CRYPTO_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ + QAT_COMN_RESP_CMP_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ + QAT_COMN_RESP_XLAT_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ + QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) + +#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 +#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 +#define ERR_CODE_NO_ERROR 0 +#define ERR_CODE_INVALID_BLOCK_TYPE -1 +#define ERR_CODE_NO_MATCH_ONES_COMP -2 +#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 +#define ERR_CODE_INCOMPLETE_LEN -4 +#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 +#define ERR_CODE_RPT_GT_SPEC_LEN -6 +#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 +#define ERR_CODE_INV_DIS_CODE_LEN -8 +#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 +#define ERR_CODE_DIS_TOO_FAR_BACK -10 +#define ERR_CODE_OVERFLOW_ERROR -11 +#define ERR_CODE_SOFT_ERROR -12 +#define ERR_CODE_FATAL_ERROR -13 +#define ERR_CODE_SSM_ERROR -14 +#define ERR_CODE_ENDPOINT_ERROR -15 + +enum icp_qat_fw_slice { + ICP_QAT_FW_SLICE_NULL = 0, + ICP_QAT_FW_SLICE_CIPHER = 1, + ICP_QAT_FW_SLICE_AUTH = 2, + ICP_QAT_FW_SLICE_DRAM_RD = 3, + ICP_QAT_FW_SLICE_DRAM_WR = 4, + ICP_QAT_FW_SLICE_COMP = 5, + ICP_QAT_FW_SLICE_XLAT = 6, + ICP_QAT_FW_SLICE_DELIMITER +}; +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h new file mode 100644 index 000000000000..72a59faa9005 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h @@ -0,0 +1,131 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_FW_INIT_ADMIN_H_ +#define _ICP_QAT_FW_INIT_ADMIN_H_ + +#include "icp_qat_fw.h" + +enum icp_qat_fw_init_admin_cmd_id { + ICP_QAT_FW_INIT_ME = 0, + ICP_QAT_FW_TRNG_ENABLE = 1, + ICP_QAT_FW_TRNG_DISABLE = 2, + ICP_QAT_FW_CONSTANTS_CFG = 3, + ICP_QAT_FW_STATUS_GET = 4, + ICP_QAT_FW_COUNTERS_GET = 5, + ICP_QAT_FW_LOOPBACK = 6, + ICP_QAT_FW_HEARTBEAT_SYNC = 7, + ICP_QAT_FW_HEARTBEAT_GET = 8 +}; + +enum icp_qat_fw_init_admin_resp_status { + ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0, + ICP_QAT_FW_INIT_RESP_STATUS_FAIL +}; + +struct icp_qat_fw_init_admin_req { + uint16_t init_cfg_sz; + uint8_t resrvd1; + uint8_t init_admin_cmd_id; + uint32_t resrvd2; + uint64_t opaque_data; + uint64_t init_cfg_ptr; + uint64_t resrvd3; +}; + +struct icp_qat_fw_init_admin_resp_hdr { + uint8_t flags; + uint8_t resrvd1; + uint8_t status; + uint8_t init_admin_cmd_id; +}; + +struct icp_qat_fw_init_admin_resp_pars { + union { + uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + uint32_t version_patch_num; + uint8_t context_id; + uint8_t ae_id; + uint16_t resrvd1; + uint64_t resrvd2; + } s1; + struct { + uint64_t req_rec_count; + uint64_t resp_sent_count; + } s2; + } u; +}; + +struct icp_qat_fw_init_admin_resp { + struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr; + union { + uint32_t resrvd2; + struct { + uint16_t version_minor_num; + uint16_t version_major_num; + } s; + } u; + uint64_t opaque_data; + struct icp_qat_fw_init_admin_resp_pars init_resp_pars; +}; + +#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 +#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1 +#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0 +#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE +#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \ + ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags) + +#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \ + ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val) + +#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, \ + ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK) +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h new file mode 100644 index 000000000000..d1d4802a857f --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h @@ -0,0 +1,403 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_FW_LA_H_ +#define _ICP_QAT_FW_LA_H_ +#include "icp_qat_fw.h" + +enum icp_qat_fw_la_cmd_id { + ICP_QAT_FW_LA_CMD_CIPHER = 0, + ICP_QAT_FW_LA_CMD_AUTH = 1, + ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, + ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, + ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, + ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, + ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, + ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, + ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, + ICP_QAT_FW_LA_CMD_MGF1 = 9, + ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, + ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, + ICP_QAT_FW_LA_CMD_DELIMITER = 12 +}; + +#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR +#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR + +struct icp_qat_fw_la_bulk_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 +#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 +#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 +#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 +#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 +#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 +#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 +#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 +#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 +#define ICP_QAT_FW_LA_GCM_PROTO 2 +#define ICP_QAT_FW_LA_CCM_PROTO 1 +#define ICP_QAT_FW_LA_NO_PROTO 0 +#define QAT_LA_PROTO_BITPOS 7 +#define QAT_LA_PROTO_MASK 0x7 +#define ICP_QAT_FW_LA_CMP_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 +#define QAT_LA_CMP_AUTH_RES_BITPOS 6 +#define QAT_LA_CMP_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_RET_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 +#define QAT_LA_RET_AUTH_RES_BITPOS 5 +#define QAT_LA_RET_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_UPDATE_STATE 1 +#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 +#define QAT_LA_UPDATE_STATE_BITPOS 4 +#define QAT_LA_UPDATE_STATE_MASK 0x1 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 +#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 +#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 +#define QAT_LA_CIPH_IV_FLD_BITPOS 2 +#define QAT_LA_CIPH_IV_FLD_MASK 0x1 +#define ICP_QAT_FW_LA_PARTIAL_NONE 0 +#define ICP_QAT_FW_LA_PARTIAL_START 1 +#define ICP_QAT_FW_LA_PARTIAL_MID 3 +#define ICP_QAT_FW_LA_PARTIAL_END 2 +#define QAT_LA_PARTIAL_BITPOS 0 +#define QAT_LA_PARTIAL_MASK 0x3 +#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ + cmp_auth, ret_auth, update_state, \ + ciph_iv, ciphcfg, partial) \ + (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ + ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ + QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ + ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ + QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ + ((proto & QAT_LA_PROTO_MASK) << \ + QAT_LA_PROTO_BITPOS) | \ + ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ + QAT_LA_CMP_AUTH_RES_BITPOS) | \ + ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ + QAT_LA_RET_AUTH_RES_BITPOS) | \ + ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ + QAT_LA_UPDATE_STATE_BITPOS) | \ + ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ + QAT_LA_CIPH_IV_FLD_BITPOS) | \ + ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ + ((partial & QAT_LA_PARTIAL_MASK) << \ + QAT_LA_PARTIAL_BITPOS)) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ + QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +struct icp_qat_fw_cipher_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } s1; + } u; +}; + +struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } sl; + } u; +}; + +struct icp_qat_fw_cipher_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id; + uint8_t cipher_padding_sz; + uint8_t resrvd1; + uint16_t resrvd2; + uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; +}; + +struct icp_qat_fw_auth_cd_ctrl_hdr { + uint32_t resrvd1; + uint8_t resrvd2; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id; + uint8_t resrvd3; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd4; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id_cipher; + uint8_t cipher_padding_sz; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id_auth; + uint8_t resrvd1; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd2; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 +#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 +#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 +#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ + (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) +#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) + +struct icp_qat_fw_la_cipher_req_params { + uint32_t cipher_offset; + uint32_t cipher_length; + union { + uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + uint64_t cipher_IV_ptr; + uint64_t resrvd1; + } s; + } u; +}; + +struct icp_qat_fw_la_auth_req_params { + uint32_t auth_off; + uint32_t auth_len; + union { + uint64_t auth_partial_st_prefix; + uint64_t aad_adr; + } u1; + uint64_t auth_res_addr; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint8_t hash_state_sz; + uint8_t auth_res_sz; +} __packed; + +struct icp_qat_fw_la_auth_req_params_resrvd_flds { + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint16_t resrvd2; +}; + +struct icp_qat_fw_la_resp { + struct icp_qat_fw_comn_resp_hdr comn_resp; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; +#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ + ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h new file mode 100644 index 000000000000..cc7ec4071929 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h @@ -0,0 +1,305 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_HW_H_ +#define _ICP_QAT_HW_H_ + +enum icp_qat_hw_ae_id { + ICP_QAT_HW_AE_0 = 0, + ICP_QAT_HW_AE_1 = 1, + ICP_QAT_HW_AE_2 = 2, + ICP_QAT_HW_AE_3 = 3, + ICP_QAT_HW_AE_4 = 4, + ICP_QAT_HW_AE_5 = 5, + ICP_QAT_HW_AE_6 = 6, + ICP_QAT_HW_AE_7 = 7, + ICP_QAT_HW_AE_8 = 8, + ICP_QAT_HW_AE_9 = 9, + ICP_QAT_HW_AE_10 = 10, + ICP_QAT_HW_AE_11 = 11, + ICP_QAT_HW_AE_DELIMITER = 12 +}; + +enum icp_qat_hw_qat_id { + ICP_QAT_HW_QAT_0 = 0, + ICP_QAT_HW_QAT_1 = 1, + ICP_QAT_HW_QAT_2 = 2, + ICP_QAT_HW_QAT_3 = 3, + ICP_QAT_HW_QAT_4 = 4, + ICP_QAT_HW_QAT_5 = 5, + ICP_QAT_HW_QAT_DELIMITER = 6 +}; + +enum icp_qat_hw_auth_algo { + ICP_QAT_HW_AUTH_ALGO_NULL = 0, + ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, + ICP_QAT_HW_AUTH_ALGO_MD5 = 2, + ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, + ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, + ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, + ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, + ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, + ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, + ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, + ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, + ICP_QAT_HW_AUTH_RESERVED_1 = 15, + ICP_QAT_HW_AUTH_RESERVED_2 = 16, + ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, + ICP_QAT_HW_AUTH_RESERVED_3 = 18, + ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, + ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 +}; + +enum icp_qat_hw_auth_mode { + ICP_QAT_HW_AUTH_MODE0 = 0, + ICP_QAT_HW_AUTH_MODE1 = 1, + ICP_QAT_HW_AUTH_MODE2 = 2, + ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 +}; + +struct icp_qat_hw_auth_config { + uint32_t config; + uint32_t reserved; +}; + +#define QAT_AUTH_MODE_BITPOS 4 +#define QAT_AUTH_MODE_MASK 0xF +#define QAT_AUTH_ALGO_BITPOS 0 +#define QAT_AUTH_ALGO_MASK 0xF +#define QAT_AUTH_CMP_BITPOS 8 +#define QAT_AUTH_CMP_MASK 0x7F +#define QAT_AUTH_SHA3_PADDING_BITPOS 16 +#define QAT_AUTH_SHA3_PADDING_MASK 0x1 +#define QAT_AUTH_ALGO_SHA3_BITPOS 22 +#define QAT_AUTH_ALGO_SHA3_MASK 0x3 +#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ + ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ + (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ + (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ + QAT_AUTH_ALGO_SHA3_BITPOS) | \ + (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ + (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ + & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ + (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) + +struct icp_qat_hw_auth_counter { + __be32 counter; + uint32_t reserved; +}; + +#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF +#define QAT_AUTH_COUNT_BITPOS 0 +#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ + (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) + +struct icp_qat_hw_auth_setup { + struct icp_qat_hw_auth_config auth_config; + struct icp_qat_hw_auth_counter auth_counter; +}; + +#define QAT_HW_DEFAULT_ALIGNMENT 8 +#define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1))) +#define ICP_QAT_HW_NULL_STATE1_SZ 32 +#define ICP_QAT_HW_MD5_STATE1_SZ 16 +#define ICP_QAT_HW_SHA1_STATE1_SZ 20 +#define ICP_QAT_HW_SHA224_STATE1_SZ 32 +#define ICP_QAT_HW_SHA256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA384_STATE1_SZ 64 +#define ICP_QAT_HW_SHA512_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 +#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 +#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_F9_STATE1_SZ 32 +#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 +#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 +#define ICP_QAT_HW_NULL_STATE2_SZ 32 +#define ICP_QAT_HW_MD5_STATE2_SZ 16 +#define ICP_QAT_HW_SHA1_STATE2_SZ 20 +#define ICP_QAT_HW_SHA224_STATE2_SZ 32 +#define ICP_QAT_HW_SHA256_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 +#define ICP_QAT_HW_SHA384_STATE2_SZ 64 +#define ICP_QAT_HW_SHA512_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 +#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 +#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 +#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 +#define ICP_QAT_HW_F9_IK_SZ 16 +#define ICP_QAT_HW_F9_FK_SZ 16 +#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ + ICP_QAT_HW_F9_FK_SZ) +#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 +#define ICP_QAT_HW_GALOIS_H_SZ 16 +#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 +#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 + +struct icp_qat_hw_auth_sha512 { + struct icp_qat_hw_auth_setup inner_setup; + uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; + struct icp_qat_hw_auth_setup outer_setup; + uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; +}; + +struct icp_qat_hw_auth_algo_blk { + struct icp_qat_hw_auth_sha512 sha; +}; + +#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 +#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF + +enum icp_qat_hw_cipher_algo { + ICP_QAT_HW_CIPHER_ALGO_NULL = 0, + ICP_QAT_HW_CIPHER_ALGO_DES = 1, + ICP_QAT_HW_CIPHER_ALGO_3DES = 2, + ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, + ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, + ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, + ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, + ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, + ICP_QAT_HW_CIPHER_DELIMITER = 10 +}; + +enum icp_qat_hw_cipher_mode { + ICP_QAT_HW_CIPHER_ECB_MODE = 0, + ICP_QAT_HW_CIPHER_CBC_MODE = 1, + ICP_QAT_HW_CIPHER_CTR_MODE = 2, + ICP_QAT_HW_CIPHER_F8_MODE = 3, + ICP_QAT_HW_CIPHER_XTS_MODE = 6, + ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 +}; + +struct icp_qat_hw_cipher_config { + uint32_t val; + uint32_t reserved; +}; + +enum icp_qat_hw_cipher_dir { + ICP_QAT_HW_CIPHER_ENCRYPT = 0, + ICP_QAT_HW_CIPHER_DECRYPT = 1, +}; + +enum icp_qat_hw_cipher_convert { + ICP_QAT_HW_CIPHER_NO_CONVERT = 0, + ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, +}; + +#define QAT_CIPHER_MODE_BITPOS 4 +#define QAT_CIPHER_MODE_MASK 0xF +#define QAT_CIPHER_ALGO_BITPOS 0 +#define QAT_CIPHER_ALGO_MASK 0xF +#define QAT_CIPHER_CONVERT_BITPOS 9 +#define QAT_CIPHER_CONVERT_MASK 0x1 +#define QAT_CIPHER_DIR_BITPOS 8 +#define QAT_CIPHER_DIR_MASK 0x1 +#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 +#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 +#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ + (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ + ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ + ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ + ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) +#define ICP_QAT_HW_DES_BLK_SZ 8 +#define ICP_QAT_HW_3DES_BLK_SZ 8 +#define ICP_QAT_HW_NULL_BLK_SZ 8 +#define ICP_QAT_HW_AES_BLK_SZ 16 +#define ICP_QAT_HW_KASUMI_BLK_SZ 8 +#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 +#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 +#define ICP_QAT_HW_NULL_KEY_SZ 256 +#define ICP_QAT_HW_DES_KEY_SZ 8 +#define ICP_QAT_HW_3DES_KEY_SZ 24 +#define ICP_QAT_HW_AES_128_KEY_SZ 16 +#define ICP_QAT_HW_AES_192_KEY_SZ 24 +#define ICP_QAT_HW_AES_256_KEY_SZ 32 +#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_KASUMI_KEY_SZ 16 +#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_ARC4_KEY_SZ 256 +#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 +#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 +#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024 + +struct icp_qat_hw_cipher_aes256_f8 { + struct icp_qat_hw_cipher_config cipher_config; + uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; +}; + +struct icp_qat_hw_cipher_algo_blk { + struct icp_qat_hw_cipher_aes256_f8 aes; +}; +#endif -- cgit v1.2.3 From d370cec3219490656d72f5ae6e5df32c113c5a44 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 5 Jun 2014 13:43:32 -0700 Subject: crypto: qat - Intel(R) QAT crypto interface This patch adds qat crypto interface. Acked-by: John Griffin Reviewed-by: Bruce W. Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 1034 ++++++++++++++++++++++++++++ drivers/crypto/qat/qat_common/qat_crypto.c | 284 ++++++++ drivers/crypto/qat/qat_common/qat_crypto.h | 83 +++ 3 files changed, 1401 insertions(+) create mode 100644 drivers/crypto/qat/qat_common/qat_algs.c create mode 100644 drivers/crypto/qat/qat_common/qat_crypto.c create mode 100644 drivers/crypto/qat/qat_common/qat_crypto.h (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c new file mode 100644 index 000000000000..193b753d6a0a --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -0,0 +1,1034 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_transport.h" +#include "adf_common_drv.h" +#include "qat_crypto.h" +#include "icp_qat_hw.h" +#include "icp_qat_fw.h" +#include "icp_qat_fw_la.h" + +#define QAT_AES_HW_CONFIG_ENC(alg) \ + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ + ICP_QAT_HW_CIPHER_NO_CONVERT, \ + ICP_QAT_HW_CIPHER_ENCRYPT) + +#define QAT_AES_HW_CONFIG_DEC(alg) \ + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ + ICP_QAT_HW_CIPHER_KEY_CONVERT, \ + ICP_QAT_HW_CIPHER_DECRYPT) + +static atomic_t active_dev; + +struct qat_alg_buf { + uint32_t len; + uint32_t resrvd; + uint64_t addr; +} __packed; + +struct qat_alg_buf_list { + uint64_t resrvd; + uint32_t num_bufs; + uint32_t num_mapped_bufs; + struct qat_alg_buf bufers[]; +} __packed __aligned(64); + +/* Common content descriptor */ +struct qat_alg_cd { + union { + struct qat_enc { /* Encrypt content desc */ + struct icp_qat_hw_cipher_algo_blk cipher; + struct icp_qat_hw_auth_algo_blk hash; + } qat_enc_cd; + struct qat_dec { /* Decrytp content desc */ + struct icp_qat_hw_auth_algo_blk hash; + struct icp_qat_hw_cipher_algo_blk cipher; + } qat_dec_cd; + }; +} __aligned(64); + +#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) + +struct qat_auth_state { + uint8_t data[MAX_AUTH_STATE_SIZE]; +} __aligned(64); + +struct qat_alg_session_ctx { + struct qat_alg_cd *enc_cd; + dma_addr_t enc_cd_paddr; + struct qat_alg_cd *dec_cd; + dma_addr_t dec_cd_paddr; + struct qat_auth_state *auth_hw_state_enc; + dma_addr_t auth_state_enc_paddr; + struct qat_auth_state *auth_hw_state_dec; + dma_addr_t auth_state_dec_paddr; + struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; + struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; + struct qat_crypto_instance *inst; + struct crypto_tfm *tfm; + struct crypto_shash *hash_tfm; + enum icp_qat_hw_auth_algo qat_hash_alg; + uint8_t salt[AES_BLOCK_SIZE]; + spinlock_t lock; /* protects qat_alg_session_ctx struct */ +}; + +static int get_current_node(void) +{ + return cpu_data(current_thread_info()->cpu).phys_proc_id; +} + +static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) +{ + switch (qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + return ICP_QAT_HW_SHA1_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + return ICP_QAT_HW_SHA256_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + return ICP_QAT_HW_SHA512_STATE1_SZ; + default: + return -EFAULT; + }; + return -EFAULT; +} + +static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, + struct qat_alg_session_ctx *ctx, + const uint8_t *auth_key, + unsigned int auth_keylen, uint8_t *auth_state) +{ + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(ctx->hash_tfm)]; + } desc; + struct sha1_state sha1; + struct sha256_state sha256; + struct sha512_state sha512; + int block_size = crypto_shash_blocksize(ctx->hash_tfm); + int digest_size = crypto_shash_digestsize(ctx->hash_tfm); + uint8_t *ipad = auth_state; + uint8_t *opad = ipad + block_size; + __be32 *hash_state_out; + __be64 *hash512_state_out; + int i, offset; + + desc.shash.tfm = ctx->hash_tfm; + desc.shash.flags = 0x0; + + if (auth_keylen > block_size) { + char buff[SHA512_BLOCK_SIZE]; + int ret = crypto_shash_digest(&desc.shash, auth_key, + auth_keylen, buff); + if (ret) + return ret; + + memcpy(ipad, buff, digest_size); + memcpy(opad, buff, digest_size); + memset(ipad + digest_size, 0, block_size - digest_size); + memset(opad + digest_size, 0, block_size - digest_size); + } else { + memcpy(ipad, auth_key, auth_keylen); + memcpy(opad, auth_key, auth_keylen); + memset(ipad + auth_keylen, 0, block_size - auth_keylen); + memset(opad + auth_keylen, 0, block_size - auth_keylen); + } + + for (i = 0; i < block_size; i++) { + char *ipad_ptr = ipad + i; + char *opad_ptr = opad + i; + *ipad_ptr ^= 0x36; + *opad_ptr ^= 0x5C; + } + + if (crypto_shash_init(&desc.shash)) + return -EFAULT; + + if (crypto_shash_update(&desc.shash, ipad, block_size)) + return -EFAULT; + + hash_state_out = (__be32 *)hash->sha.state1; + hash512_state_out = (__be64 *)hash_state_out; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (crypto_shash_export(&desc.shash, &sha1)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out++) + *hash_state_out = cpu_to_be32(*(sha1.state + i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (crypto_shash_export(&desc.shash, &sha256)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out++) + *hash_state_out = cpu_to_be32(*(sha256.state + i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (crypto_shash_export(&desc.shash, &sha512)) + return -EFAULT; + for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) + *hash512_state_out = cpu_to_be64(*(sha512.state + i)); + break; + default: + return -EFAULT; + } + + if (crypto_shash_init(&desc.shash)) + return -EFAULT; + + if (crypto_shash_update(&desc.shash, opad, block_size)) + return -EFAULT; + + offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); + hash_state_out = (__be32 *)(hash->sha.state1 + offset); + hash512_state_out = (__be64 *)hash_state_out; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (crypto_shash_export(&desc.shash, &sha1)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out++) + *hash_state_out = cpu_to_be32(*(sha1.state + i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (crypto_shash_export(&desc.shash, &sha256)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out++) + *hash_state_out = cpu_to_be32(*(sha256.state + i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (crypto_shash_export(&desc.shash, &sha512)) + return -EFAULT; + for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) + *hash512_state_out = cpu_to_be64(*(sha512.state + i)); + break; + default: + return -EFAULT; + } + return 0; +} + +static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) +{ + header->hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); + header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; + header->comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, + QAT_COMN_PTR_TYPE_SGL); + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_DIGEST_IN_BUFFER); + ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_PARTIAL_NONE); + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, + ICP_QAT_FW_CIPH_IV_16BYTE_DATA); + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_UPDATE_STATE); +} + +static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, + int alg, struct crypto_authenc_keys *keys) +{ + struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); + unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; + struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; + struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; + struct icp_qat_hw_auth_algo_blk *hash = + (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + + sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); + struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + struct icp_qat_fw_la_auth_req_params *auth_param = + (struct icp_qat_fw_la_auth_req_params *) + ((char *)&req_tmpl->serv_specif_rqpars + + sizeof(struct icp_qat_fw_la_cipher_req_params)); + + /* CD setup */ + cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); + memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); + hash->sha.inner_setup.auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, + ctx->qat_hash_alg, digestsize); + hash->sha.inner_setup.auth_counter.counter = + cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); + + if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, + (uint8_t *)ctx->auth_hw_state_enc)) + return -EFAULT; + + /* Request setup */ + qat_alg_init_common_hdr(header); + header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; + cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; + + /* Cipher CD config setup */ + cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; + cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; + cipher_cd_ctrl->cipher_cfg_offset = 0; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); + /* Auth CD config setup */ + hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; + hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; + hash_cd_ctrl->inner_res_sz = digestsize; + hash_cd_ctrl->final_sz = digestsize; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + hash_cd_ctrl->inner_state1_sz = + round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); + hash_cd_ctrl->inner_state2_sz = + round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; + hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; + hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; + break; + default: + break; + } + hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + + ((sizeof(struct icp_qat_hw_auth_setup) + + round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); + auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr + + sizeof(struct icp_qat_hw_auth_counter) + + round_up(hash_cd_ctrl->inner_state1_sz, 8); + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); + return 0; +} + +static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, + int alg, struct crypto_authenc_keys *keys) +{ + struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); + unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; + struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; + struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; + struct icp_qat_hw_cipher_algo_blk *cipher = + (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + + sizeof(struct icp_qat_hw_auth_setup) + + roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); + struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + struct icp_qat_fw_la_auth_req_params *auth_param = + (struct icp_qat_fw_la_auth_req_params *) + ((char *)&req_tmpl->serv_specif_rqpars + + sizeof(struct icp_qat_fw_la_cipher_req_params)); + + /* CD setup */ + cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg); + memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); + hash->sha.inner_setup.auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, + ctx->qat_hash_alg, + digestsize); + hash->sha.inner_setup.auth_counter.counter = + cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); + + if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, + (uint8_t *)ctx->auth_hw_state_dec)) + return -EFAULT; + + /* Request setup */ + qat_alg_init_common_hdr(header); + header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; + cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; + + /* Cipher CD config setup */ + cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; + cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; + cipher_cd_ctrl->cipher_cfg_offset = + (sizeof(struct icp_qat_hw_auth_setup) + + roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); + + /* Auth CD config setup */ + hash_cd_ctrl->hash_cfg_offset = 0; + hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; + hash_cd_ctrl->inner_res_sz = digestsize; + hash_cd_ctrl->final_sz = digestsize; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + hash_cd_ctrl->inner_state1_sz = + round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); + hash_cd_ctrl->inner_state2_sz = + round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; + hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; + hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; + break; + default: + break; + } + + hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + + ((sizeof(struct icp_qat_hw_auth_setup) + + round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); + auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr + + sizeof(struct icp_qat_hw_auth_counter) + + round_up(hash_cd_ctrl->inner_state1_sz, 8); + auth_param->auth_res_sz = digestsize; + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + return 0; +} + +static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx, + const uint8_t *key, unsigned int keylen) +{ + struct crypto_authenc_keys keys; + int alg; + + if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) + return -EFAULT; + + if (crypto_authenc_extractkeys(&keys, key, keylen)) + goto bad_key; + + switch (keys.enckeylen) { + case AES_KEYSIZE_128: + alg = ICP_QAT_HW_CIPHER_ALGO_AES128; + break; + case AES_KEYSIZE_192: + alg = ICP_QAT_HW_CIPHER_ALGO_AES192; + break; + case AES_KEYSIZE_256: + alg = ICP_QAT_HW_CIPHER_ALGO_AES256; + break; + default: + goto bad_key; + break; + } + + if (qat_alg_init_enc_session(ctx, alg, &keys)) + goto error; + + if (qat_alg_init_dec_session(ctx, alg, &keys)) + goto error; + + return 0; +bad_key: + crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +error: + return -EFAULT; +} + +static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, + unsigned int keylen) +{ + struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm); + struct device *dev; + + spin_lock(&ctx->lock); + if (ctx->enc_cd) { + /* rekeying */ + dev = &GET_DEV(ctx->inst->accel_dev); + memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); + memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); + memset(ctx->auth_hw_state_enc, 0, + sizeof(struct qat_auth_state)); + memset(ctx->auth_hw_state_dec, 0, + sizeof(struct qat_auth_state)); + memset(&ctx->enc_fw_req_tmpl, 0, + sizeof(struct icp_qat_fw_la_bulk_req)); + memset(&ctx->dec_fw_req_tmpl, 0, + sizeof(struct icp_qat_fw_la_bulk_req)); + } else { + /* new key */ + int node = get_current_node(); + struct qat_crypto_instance *inst = + qat_crypto_get_instance_node(node); + if (!inst) { + spin_unlock(&ctx->lock); + return -EINVAL; + } + + dev = &GET_DEV(inst->accel_dev); + ctx->inst = inst; + ctx->enc_cd = dma_zalloc_coherent(dev, + sizeof(struct qat_alg_cd), + &ctx->enc_cd_paddr, + GFP_ATOMIC); + if (!ctx->enc_cd) { + spin_unlock(&ctx->lock); + return -ENOMEM; + } + ctx->dec_cd = dma_zalloc_coherent(dev, + sizeof(struct qat_alg_cd), + &ctx->dec_cd_paddr, + GFP_ATOMIC); + if (!ctx->dec_cd) { + spin_unlock(&ctx->lock); + goto out_free_enc; + } + ctx->auth_hw_state_enc = + dma_zalloc_coherent(dev, sizeof(struct qat_auth_state), + &ctx->auth_state_enc_paddr, + GFP_ATOMIC); + if (!ctx->auth_hw_state_enc) { + spin_unlock(&ctx->lock); + goto out_free_dec; + } + ctx->auth_hw_state_dec = + dma_zalloc_coherent(dev, sizeof(struct qat_auth_state), + &ctx->auth_state_dec_paddr, + GFP_ATOMIC); + if (!ctx->auth_hw_state_dec) { + spin_unlock(&ctx->lock); + goto out_free_auth_enc; + } + } + spin_unlock(&ctx->lock); + if (qat_alg_init_sessions(ctx, key, keylen)) + goto out_free_all; + + return 0; + +out_free_all: + dma_free_coherent(dev, sizeof(struct qat_auth_state), + ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr); + ctx->auth_hw_state_dec = NULL; +out_free_auth_enc: + dma_free_coherent(dev, sizeof(struct qat_auth_state), + ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr); + ctx->auth_hw_state_enc = NULL; +out_free_dec: + dma_free_coherent(dev, sizeof(struct qat_alg_cd), + ctx->dec_cd, ctx->dec_cd_paddr); + ctx->dec_cd = NULL; +out_free_enc: + dma_free_coherent(dev, sizeof(struct qat_alg_cd), + ctx->enc_cd, ctx->enc_cd_paddr); + ctx->enc_cd = NULL; + return -ENOMEM; +} + +static void qat_alg_free_bufl(struct qat_crypto_instance *inst, + struct qat_crypto_request *qat_req) +{ + struct device *dev = &GET_DEV(inst->accel_dev); + struct qat_alg_buf_list *bl = qat_req->buf.bl; + struct qat_alg_buf_list *blout = qat_req->buf.blout; + dma_addr_t blp = qat_req->buf.blp; + dma_addr_t blpout = qat_req->buf.bloutp; + size_t sz = qat_req->buf.sz; + int i, bufs = bl->num_bufs; + + for (i = 0; i < bl->num_bufs; i++) + dma_unmap_single(dev, bl->bufers[i].addr, + bl->bufers[i].len, DMA_BIDIRECTIONAL); + + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + kfree(bl); + if (blp != blpout) { + /* If out of place operation dma unmap only data */ + int bufless = bufs - blout->num_mapped_bufs; + for (i = bufless; i < bufs; i++) { + dma_unmap_single(dev, blout->bufers[i].addr, + blout->bufers[i].len, + DMA_BIDIRECTIONAL); + } + dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE); + kfree(blout); + } +} + +static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, + struct scatterlist *assoc, + struct scatterlist *sgl, + struct scatterlist *sglout, uint8_t *iv, + uint8_t ivlen, + struct qat_crypto_request *qat_req) +{ + struct device *dev = &GET_DEV(inst->accel_dev); + int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); + struct qat_alg_buf_list *bufl; + struct qat_alg_buf_list *buflout = NULL; + dma_addr_t blp; + dma_addr_t bloutp = 0; + struct scatterlist *sg; + size_t sz = sizeof(struct qat_alg_buf_list) + + ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); + + if (unlikely(!n)) + return -EINVAL; + + bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); + if (unlikely(!bufl)) + return -ENOMEM; + + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err; + + for_each_sg(assoc, sg, assoc_n, i) { + bufl->bufers[bufs].addr = dma_map_single(dev, + sg_virt(sg), + sg->length, + DMA_BIDIRECTIONAL); + bufl->bufers[bufs].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) + goto err; + bufs++; + } + bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, + DMA_BIDIRECTIONAL); + bufl->bufers[bufs].len = ivlen; + if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) + goto err; + bufs++; + + for_each_sg(sgl, sg, n, i) { + int y = i + bufs; + bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), + sg->length, + DMA_BIDIRECTIONAL); + bufl->bufers[y].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) + goto err; + } + bufl->num_bufs = n + bufs; + qat_req->buf.bl = bufl; + qat_req->buf.blp = blp; + qat_req->buf.sz = sz; + /* Handle out of place operation */ + if (sgl != sglout) { + struct qat_alg_buf *bufers; + + buflout = kmalloc_node(sz, GFP_ATOMIC, + inst->accel_dev->numa_node); + if (unlikely(!buflout)) + goto err; + bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err; + bufers = buflout->bufers; + /* For out of place operation dma map only data and + * reuse assoc mapping and iv */ + for (i = 0; i < bufs; i++) { + bufers[i].len = bufl->bufers[i].len; + bufers[i].addr = bufl->bufers[i].addr; + } + for_each_sg(sglout, sg, n, i) { + int y = i + bufs; + bufers[y].addr = dma_map_single(dev, sg_virt(sg), + sg->length, + DMA_BIDIRECTIONAL); + buflout->bufers[y].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufers[y].addr))) + goto err; + } + buflout->num_bufs = n + bufs; + buflout->num_mapped_bufs = n; + qat_req->buf.blout = buflout; + qat_req->buf.bloutp = bloutp; + } else { + /* Otherwise set the src and dst to the same address */ + qat_req->buf.bloutp = qat_req->buf.blp; + } + return 0; +err: + dev_err(dev, "Failed to map buf for dma\n"); + for_each_sg(sgl, sg, n + bufs, i) { + if (!dma_mapping_error(dev, bufl->bufers[i].addr)) { + dma_unmap_single(dev, bufl->bufers[i].addr, + bufl->bufers[i].len, + DMA_BIDIRECTIONAL); + } + } + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + kfree(bufl); + if (sgl != sglout && buflout) { + for_each_sg(sglout, sg, n, i) { + int y = i + bufs; + if (!dma_mapping_error(dev, buflout->bufers[y].addr)) + dma_unmap_single(dev, buflout->bufers[y].addr, + buflout->bufers[y].len, + DMA_BIDIRECTIONAL); + } + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE); + kfree(buflout); + } + return -ENOMEM; +} + +void qat_alg_callback(void *resp) +{ + struct icp_qat_fw_la_resp *qat_resp = resp; + struct qat_crypto_request *qat_req = + (void *)(dma_addr_t)qat_resp->opaque_data; + struct qat_alg_session_ctx *ctx = qat_req->ctx; + struct qat_crypto_instance *inst = ctx->inst; + struct aead_request *areq = qat_req->areq; + uint8_t stat_filed = qat_resp->comn_resp.comn_status; + int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); + + qat_alg_free_bufl(inst, qat_req); + if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) + res = -EBADMSG; + areq->base.complete(&(areq->base), res); +} + +static int qat_alg_dec(struct aead_request *areq) +{ + struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); + struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); + struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_request *qat_req = aead_request_ctx(areq); + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + struct icp_qat_fw_la_bulk_req *msg; + int digst_size = crypto_aead_crt(aead_tfm)->authsize; + int ret, ctr = 0; + + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, + areq->iv, AES_BLOCK_SIZE, qat_req); + if (unlikely(ret)) + return ret; + + msg = &qat_req->req; + *msg = ctx->dec_fw_req_tmpl; + qat_req->ctx = ctx; + qat_req->areq = areq; + qat_req->req.comn_mid.opaque_data = (uint64_t)(dma_addr_t)qat_req; + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; + cipher_param->cipher_length = areq->cryptlen - digst_size; + cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; + memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); + auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param->auth_off = 0; + auth_param->auth_len = areq->assoclen + + cipher_param->cipher_length + AES_BLOCK_SIZE; + do { + ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + } while (ret == -EAGAIN && ctr++ < 10); + + if (ret == -EAGAIN) { + qat_alg_free_bufl(ctx->inst, qat_req); + return -EBUSY; + } + return -EINPROGRESS; +} + +static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, + int enc_iv) +{ + struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); + struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); + struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_request *qat_req = aead_request_ctx(areq); + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + struct icp_qat_fw_la_bulk_req *msg; + int ret, ctr = 0; + + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, + iv, AES_BLOCK_SIZE, qat_req); + if (unlikely(ret)) + return ret; + + msg = &qat_req->req; + *msg = ctx->enc_fw_req_tmpl; + qat_req->ctx = ctx; + qat_req->areq = areq; + qat_req->req.comn_mid.opaque_data = (uint64_t)(dma_addr_t)qat_req; + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + + if (enc_iv) { + cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE; + cipher_param->cipher_offset = areq->assoclen; + } else { + memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); + cipher_param->cipher_length = areq->cryptlen; + cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; + } + auth_param->auth_off = 0; + auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE; + + do { + ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + } while (ret == -EAGAIN && ctr++ < 10); + + if (ret == -EAGAIN) { + qat_alg_free_bufl(ctx->inst, qat_req); + return -EBUSY; + } + return -EINPROGRESS; +} + +static int qat_alg_enc(struct aead_request *areq) +{ + return qat_alg_enc_internal(areq, areq->iv, 0); +} + +static int qat_alg_genivenc(struct aead_givcrypt_request *req) +{ + struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); + struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); + struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); + __be64 seq; + + memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); + seq = cpu_to_be64(req->seq); + memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), + &seq, sizeof(uint64_t)); + return qat_alg_enc_internal(&req->areq, req->giv, 1); +} + +static int qat_alg_init(struct crypto_tfm *tfm, + enum icp_qat_hw_auth_algo hash, const char *hash_name) +{ + struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); + + memset(ctx, '\0', sizeof(*ctx)); + ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(ctx->hash_tfm)) + return -EFAULT; + spin_lock_init(&ctx->lock); + ctx->qat_hash_alg = hash; + tfm->crt_aead.reqsize = sizeof(struct aead_request) + + sizeof(struct qat_crypto_request); + ctx->tfm = tfm; + return 0; +} + +static int qat_alg_sha1_init(struct crypto_tfm *tfm) +{ + return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); +} + +static int qat_alg_sha256_init(struct crypto_tfm *tfm) +{ + return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); +} + +static int qat_alg_sha512_init(struct crypto_tfm *tfm) +{ + return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); +} + +static void qat_alg_exit(struct crypto_tfm *tfm) +{ + struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev; + + if (!IS_ERR(ctx->hash_tfm)) + crypto_free_shash(ctx->hash_tfm); + + if (!inst) + return; + + dev = &GET_DEV(inst->accel_dev); + if (ctx->enc_cd) + dma_free_coherent(dev, sizeof(struct qat_alg_cd), + ctx->enc_cd, ctx->enc_cd_paddr); + if (ctx->dec_cd) + dma_free_coherent(dev, sizeof(struct qat_alg_cd), + ctx->dec_cd, ctx->dec_cd_paddr); + if (ctx->auth_hw_state_enc) + dma_free_coherent(dev, sizeof(struct qat_auth_state), + ctx->auth_hw_state_enc, + ctx->auth_state_enc_paddr); + + if (ctx->auth_hw_state_dec) + dma_free_coherent(dev, sizeof(struct qat_auth_state), + ctx->auth_hw_state_dec, + ctx->auth_state_dec_paddr); + + qat_crypto_put_instance(inst); +} + +static struct crypto_alg qat_algs[] = { { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha1", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_session_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = qat_alg_sha1_init, + .cra_exit = qat_alg_exit, + .cra_u = { + .aead = { + .setkey = qat_alg_setkey, + .decrypt = qat_alg_dec, + .encrypt = qat_alg_enc, + .givencrypt = qat_alg_genivenc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + }, +}, { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha256", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_session_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = qat_alg_sha256_init, + .cra_exit = qat_alg_exit, + .cra_u = { + .aead = { + .setkey = qat_alg_setkey, + .decrypt = qat_alg_dec, + .encrypt = qat_alg_enc, + .givencrypt = qat_alg_genivenc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + }, +}, { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha512", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_session_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = qat_alg_sha512_init, + .cra_exit = qat_alg_exit, + .cra_u = { + .aead = { + .setkey = qat_alg_setkey, + .decrypt = qat_alg_dec, + .encrypt = qat_alg_enc, + .givencrypt = qat_alg_genivenc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + }, +} }; + +int qat_algs_register(void) +{ + if (atomic_add_return(1, &active_dev) == 1) { + int i; + + for (i = 0; i < ARRAY_SIZE(qat_algs); i++) + qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC; + return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); + } + return 0; +} + +int qat_algs_unregister(void) +{ + if (atomic_sub_return(1, &active_dev) == 0) + return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + return 0; +} + +int qat_algs_init(void) +{ + atomic_set(&active_dev, 0); + crypto_get_default_rng(); + return 0; +} + +void qat_algs_exit(void) +{ + crypto_put_default_rng(); +} diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c new file mode 100644 index 000000000000..cc562cb67e37 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -0,0 +1,284 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_transport.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "qat_crypto.h" +#include "icp_qat_fw.h" + +#define SEC ADF_KERNEL_SEC + +static struct service_hndl qat_crypto; + +void qat_crypto_put_instance(struct qat_crypto_instance *inst) +{ + if (atomic_sub_return(1, &inst->refctr) == 0) + adf_dev_put(inst->accel_dev); +} + +static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) +{ + struct qat_crypto_instance *inst; + struct list_head *list_ptr, *tmp; + int i; + + list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) { + inst = list_entry(list_ptr, struct qat_crypto_instance, list); + + for (i = 0; i < atomic_read(&inst->refctr); i++) + qat_crypto_put_instance(inst); + + if (inst->sym_tx) + adf_remove_ring(inst->sym_tx); + + if (inst->sym_rx) + adf_remove_ring(inst->sym_rx); + + if (inst->pke_tx) + adf_remove_ring(inst->pke_tx); + + if (inst->pke_rx) + adf_remove_ring(inst->pke_rx); + + if (inst->rnd_tx) + adf_remove_ring(inst->rnd_tx); + + if (inst->rnd_rx) + adf_remove_ring(inst->rnd_rx); + + list_del(list_ptr); + kfree(inst); + } + return 0; +} + +struct qat_crypto_instance *qat_crypto_get_instance_node(int node) +{ + struct adf_accel_dev *accel_dev = NULL; + struct qat_crypto_instance *inst_best = NULL; + struct list_head *itr; + unsigned long best = ~0; + + list_for_each(itr, adf_devmgr_get_head()) { + accel_dev = list_entry(itr, struct adf_accel_dev, list); + if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) + break; + accel_dev = NULL; + } + if (!accel_dev) { + pr_err("QAT: Could not find device on give node\n"); + accel_dev = adf_devmgr_get_first(); + } + if (!accel_dev || !adf_dev_started(accel_dev)) + return NULL; + + list_for_each(itr, &accel_dev->crypto_list) { + struct qat_crypto_instance *inst; + unsigned long cur; + + inst = list_entry(itr, struct qat_crypto_instance, list); + cur = atomic_read(&inst->refctr); + if (best > cur) { + inst_best = inst; + best = cur; + } + } + if (inst_best) { + if (atomic_add_return(1, &inst_best->refctr) == 1) { + if (adf_dev_get(accel_dev)) { + atomic_dec(&inst_best->refctr); + pr_err("QAT: Could increment dev refctr\n"); + return NULL; + } + } + } + return inst_best; +} + +static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) +{ + int i; + long unsigned int bank; + long unsigned int num_inst, num_msg_sym, num_msg_asym; + int msg_size; + struct qat_crypto_instance *inst; + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + strlcpy(key, ADF_NUM_CY, sizeof(key)); + + if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) + return -EFAULT; + + if (kstrtoul(val, 0, &num_inst)) + return -EFAULT; + + for (i = 0; i < num_inst; i++) { + inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, + accel_dev->numa_node); + if (!inst) + goto err; + + list_add_tail(&inst->list, &accel_dev->crypto_list); + inst->id = i; + atomic_set(&inst->refctr, 0); + inst->accel_dev = accel_dev; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); + if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) + goto err; + + if (kstrtoul(val, 10, &bank)) + goto err; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) + goto err; + + if (kstrtoul(val, 10, &num_msg_sym)) + goto err; + num_msg_sym = num_msg_sym >> 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) + goto err; + + if (kstrtoul(val, 10, &num_msg_asym)) + goto err; + num_msg_asym = num_msg_asym >> 1; + + msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym, + msg_size, key, NULL, 0, &inst->sym_tx)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, + msg_size, key, NULL, 0, &inst->rnd_tx)) + goto err; + + msg_size = msg_size >> 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, + msg_size, key, NULL, 0, &inst->pke_tx)) + goto err; + + msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym, + msg_size, key, qat_alg_callback, 0, + &inst->sym_rx)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, + msg_size, key, qat_alg_callback, 0, + &inst->rnd_rx)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, + msg_size, key, qat_alg_callback, 0, + &inst->pke_rx)) + goto err; + } + return 0; +err: + qat_crypto_free_instances(accel_dev); + return -ENOMEM; +} + +static int qat_crypto_init(struct adf_accel_dev *accel_dev) +{ + if (qat_crypto_create_instances(accel_dev)) + return -EFAULT; + + return 0; +} + +static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev) +{ + return qat_crypto_free_instances(accel_dev); +} + +static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev, + enum adf_event event) +{ + int ret; + + switch (event) { + case ADF_EVENT_INIT: + ret = qat_crypto_init(accel_dev); + break; + case ADF_EVENT_SHUTDOWN: + ret = qat_crypto_shutdown(accel_dev); + break; + case ADF_EVENT_RESTARTING: + case ADF_EVENT_RESTARTED: + case ADF_EVENT_START: + case ADF_EVENT_STOP: + default: + ret = 0; + } + return ret; +} + +int qat_crypto_register(void) +{ + memset(&qat_crypto, 0, sizeof(qat_crypto)); + qat_crypto.event_hld = qat_crypto_event_handler; + qat_crypto.name = "qat_crypto"; + return adf_service_register(&qat_crypto); +} + +int qat_crypto_unregister(void) +{ + return adf_service_unregister(&qat_crypto); +} diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h new file mode 100644 index 000000000000..ab8468d11ddb --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -0,0 +1,83 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _QAT_CRYPTO_INSTANCE_H_ +#define _QAT_CRYPTO_INSTANCE_H_ + +#include +#include +#include "adf_accel_devices.h" +#include "icp_qat_fw_la.h" + +struct qat_crypto_instance { + struct adf_etr_ring_data *sym_tx; + struct adf_etr_ring_data *sym_rx; + struct adf_etr_ring_data *pke_tx; + struct adf_etr_ring_data *pke_rx; + struct adf_etr_ring_data *rnd_tx; + struct adf_etr_ring_data *rnd_rx; + struct adf_accel_dev *accel_dev; + struct list_head list; + unsigned long state; + int id; + atomic_t refctr; +}; + +struct qat_crypto_request_buffs { + struct qat_alg_buf_list *bl; + dma_addr_t blp; + struct qat_alg_buf_list *blout; + dma_addr_t bloutp; + size_t sz; +}; + +struct qat_crypto_request { + struct icp_qat_fw_la_bulk_req req; + struct qat_alg_session_ctx *ctx; + struct aead_request *areq; + struct qat_crypto_request_buffs buf; +}; +#endif -- cgit v1.2.3 From b4b7e67c917fa8e2171b2ee7717b44efd7a52c47 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 5 Jun 2014 13:43:47 -0700 Subject: crypto: qat - Intel(R) QAT ucode part of fw loader This patch adds microcode part of the firmware loader. v4 - splits FW loader part into two smaller patches. Acked-by: Bo Cui Reviewed-by: Bruce W. Allan Signed-off-by: Karen Xiang Signed-off-by: Pingchaox Yang Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/icp_qat_uclo.h | 377 ++++++++ drivers/crypto/qat/qat_common/qat_uclo.c | 1192 ++++++++++++++++++++++++++ 2 files changed, 1569 insertions(+) create mode 100644 drivers/crypto/qat/qat_common/icp_qat_uclo.h create mode 100644 drivers/crypto/qat/qat_common/qat_uclo.c (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h new file mode 100644 index 000000000000..120fbce517a9 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -0,0 +1,377 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef __ICP_QAT_UCLO_H__ +#define __ICP_QAT_UCLO_H__ + +#define ICP_QAT_AC_C_CPU_TYPE 0x00400000 +#define ICP_QAT_UCLO_MAX_AE 12 +#define ICP_QAT_UCLO_MAX_CTX 8 +#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) +#define ICP_QAT_UCLO_MAX_USTORE 0x4000 +#define ICP_QAT_UCLO_MAX_XFER_REG 128 +#define ICP_QAT_UCLO_MAX_GPR_REG 128 +#define ICP_QAT_UCLO_MAX_NN_REG 128 +#define ICP_QAT_UCLO_MAX_LMEM_REG 1024 +#define ICP_QAT_UCLO_AE_ALL_CTX 0xff +#define ICP_QAT_UOF_OBJID_LEN 8 +#define ICP_QAT_UOF_FID 0xc6c2 +#define ICP_QAT_UOF_MAJVER 0x4 +#define ICP_QAT_UOF_MINVER 0x11 +#define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff +#define ICP_QAT_UOF_OBJS "UOF_OBJS" +#define ICP_QAT_UOF_STRT "UOF_STRT" +#define ICP_QAT_UOF_GTID "UOF_GTID" +#define ICP_QAT_UOF_IMAG "UOF_IMAG" +#define ICP_QAT_UOF_IMEM "UOF_IMEM" +#define ICP_QAT_UOF_MSEG "UOF_MSEG" +#define ICP_QAT_UOF_LOCAL_SCOPE 1 +#define ICP_QAT_UOF_INIT_EXPR 0 +#define ICP_QAT_UOF_INIT_REG 1 +#define ICP_QAT_UOF_INIT_REG_CTX 2 +#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 + +#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) +#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) +#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1) +#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1) + +#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1) +#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1) + +enum icp_qat_uof_mem_region { + ICP_QAT_UOF_SRAM_REGION = 0x0, + ICP_QAT_UOF_LMEM_REGION = 0x3, + ICP_QAT_UOF_UMEM_REGION = 0x5 +}; + +enum icp_qat_uof_regtype { + ICP_NO_DEST, + ICP_GPA_REL, + ICP_GPA_ABS, + ICP_GPB_REL, + ICP_GPB_ABS, + ICP_SR_REL, + ICP_SR_RD_REL, + ICP_SR_WR_REL, + ICP_SR_ABS, + ICP_SR_RD_ABS, + ICP_SR_WR_ABS, + ICP_DR_REL, + ICP_DR_RD_REL, + ICP_DR_WR_REL, + ICP_DR_ABS, + ICP_DR_RD_ABS, + ICP_DR_WR_ABS, + ICP_LMEM, + ICP_LMEM0, + ICP_LMEM1, + ICP_NEIGH_REL, +}; + +struct icp_qat_uclo_page { + struct icp_qat_uclo_encap_page *encap_page; + struct icp_qat_uclo_region *region; + unsigned int flags; +}; + +struct icp_qat_uclo_region { + struct icp_qat_uclo_page *loaded; + struct icp_qat_uclo_page *page; +}; + +struct icp_qat_uclo_aeslice { + struct icp_qat_uclo_region *regions; + struct icp_qat_uclo_page *page; + struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; + struct icp_qat_uclo_encapme *encap_image; + unsigned int ctx_mask_assigned; + unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX]; +}; + +struct icp_qat_uclo_aedata { + unsigned int slice_num; + unsigned int eff_ustore_size; + struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX]; +}; + +struct icp_qat_uof_encap_obj { + char *beg_uof; + struct icp_qat_uof_objhdr *obj_hdr; + struct icp_qat_uof_chunkhdr *chunk_hdr; + struct icp_qat_uof_varmem_seg *var_mem_seg; +}; + +struct icp_qat_uclo_encap_uwblock { + unsigned int start_addr; + unsigned int words_num; + uint64_t micro_words; +}; + +struct icp_qat_uclo_encap_page { + unsigned int def_page; + unsigned int page_region; + unsigned int beg_addr_v; + unsigned int beg_addr_p; + unsigned int micro_words_num; + unsigned int uwblock_num; + struct icp_qat_uclo_encap_uwblock *uwblock; +}; + +struct icp_qat_uclo_encapme { + struct icp_qat_uof_image *img_ptr; + struct icp_qat_uclo_encap_page *page; + unsigned int ae_reg_num; + struct icp_qat_uof_ae_reg *ae_reg; + unsigned int init_regsym_num; + struct icp_qat_uof_init_regsym *init_regsym; + unsigned int sbreak_num; + struct icp_qat_uof_sbreak *sbreak; + unsigned int uwords_num; +}; + +struct icp_qat_uclo_init_mem_table { + unsigned int entry_num; + struct icp_qat_uof_initmem *init_mem; +}; + +struct icp_qat_uclo_objhdr { + char *file_buff; + unsigned int checksum; + unsigned int size; +}; + +struct icp_qat_uof_strtable { + unsigned int table_len; + unsigned int reserved; + uint64_t strings; +}; + +struct icp_qat_uclo_objhandle { + unsigned int prod_type; + unsigned int prod_rev; + struct icp_qat_uclo_objhdr *obj_hdr; + struct icp_qat_uof_encap_obj encap_uof_obj; + struct icp_qat_uof_strtable str_table; + struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE]; + struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE]; + struct icp_qat_uclo_init_mem_table init_mem_tab; + struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE]; + struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE]; + int uimage_num; + int uword_in_bytes; + int global_inited; + unsigned int ae_num; + unsigned int ustore_phy_size; + void *obj_buf; + uint64_t *uword_buf; +}; + +struct icp_qat_uof_uword_block { + unsigned int start_addr; + unsigned int words_num; + unsigned int uword_offset; + unsigned int reserved; +}; + +struct icp_qat_uof_filehdr { + unsigned short file_id; + unsigned short reserved1; + char min_ver; + char maj_ver; + unsigned short reserved2; + unsigned short max_chunks; + unsigned short num_chunks; +}; + +struct icp_qat_uof_filechunkhdr { + char chunk_id[ICP_QAT_UOF_OBJID_LEN]; + unsigned int checksum; + unsigned int offset; + unsigned int size; +}; + +struct icp_qat_uof_objhdr { + unsigned int cpu_type; + unsigned short min_cpu_ver; + unsigned short max_cpu_ver; + short max_chunks; + short num_chunks; + unsigned int reserved1; + unsigned int reserved2; +}; + +struct icp_qat_uof_chunkhdr { + char chunk_id[ICP_QAT_UOF_OBJID_LEN]; + unsigned int offset; + unsigned int size; +}; + +struct icp_qat_uof_memvar_attr { + unsigned int offset_in_byte; + unsigned int value; +}; + +struct icp_qat_uof_initmem { + unsigned int sym_name; + char region; + char scope; + unsigned short reserved1; + unsigned int addr; + unsigned int num_in_bytes; + unsigned int val_attr_num; +}; + +struct icp_qat_uof_init_regsym { + unsigned int sym_name; + char init_type; + char value_type; + char reg_type; + unsigned char ctx; + unsigned int reg_addr; + unsigned int value; +}; + +struct icp_qat_uof_varmem_seg { + unsigned int sram_base; + unsigned int sram_size; + unsigned int sram_alignment; + unsigned int sdram_base; + unsigned int sdram_size; + unsigned int sdram_alignment; + unsigned int sdram1_base; + unsigned int sdram1_size; + unsigned int sdram1_alignment; + unsigned int scratch_base; + unsigned int scratch_size; + unsigned int scratch_alignment; +}; + +struct icp_qat_uof_gtid { + char tool_id[ICP_QAT_UOF_OBJID_LEN]; + int tool_ver; + unsigned int reserved1; + unsigned int reserved2; +}; + +struct icp_qat_uof_sbreak { + unsigned int page_num; + unsigned int virt_uaddr; + unsigned char sbreak_type; + unsigned char reg_type; + unsigned short reserved1; + unsigned int addr_offset; + unsigned int reg_addr; +}; + +struct icp_qat_uof_code_page { + unsigned int page_region; + unsigned int page_num; + unsigned char def_page; + unsigned char reserved2; + unsigned short reserved1; + unsigned int beg_addr_v; + unsigned int beg_addr_p; + unsigned int neigh_reg_tab_offset; + unsigned int uc_var_tab_offset; + unsigned int imp_var_tab_offset; + unsigned int imp_expr_tab_offset; + unsigned int code_area_offset; +}; + +struct icp_qat_uof_image { + unsigned int img_name; + unsigned int ae_assigned; + unsigned int ctx_assigned; + unsigned int cpu_type; + unsigned int entry_address; + unsigned int fill_pattern[2]; + unsigned int reloadable_size; + unsigned char sensitivity; + unsigned char reserved; + unsigned short ae_mode; + unsigned short max_ver; + unsigned short min_ver; + unsigned short image_attrib; + unsigned short reserved2; + unsigned short page_region_num; + unsigned short numpages; + unsigned int reg_tab_offset; + unsigned int init_reg_sym_tab; + unsigned int sbreak_tab; + unsigned int app_metadata; +}; + +struct icp_qat_uof_objtable { + unsigned int entry_num; +}; + +struct icp_qat_uof_ae_reg { + unsigned int name; + unsigned int vis_name; + unsigned short type; + unsigned short addr; + unsigned short access_mode; + unsigned char visible; + unsigned char reserved1; + unsigned short ref_count; + unsigned short reserved2; + unsigned int xo_id; +}; + +struct icp_qat_uof_code_area { + unsigned int micro_words_num; + unsigned int uword_block_tab; +}; + +struct icp_qat_uof_batch_init { + unsigned int ae; + unsigned int addr; + unsigned int *value; + unsigned int size; + struct icp_qat_uof_batch_init *next; +}; +#endif diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c new file mode 100644 index 000000000000..a698d07ef97b --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -0,0 +1,1192 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "icp_qat_uclo.h" +#include "icp_qat_hal.h" +#include "icp_qat_fw_loader_handle.h" + +#define UWORD_CPYBUF_SIZE 1024 +#define INVLD_UWORD 0xffffffffffull +#define PID_MINOR_REV 0xf +#define PID_MAJOR_REV (0xf << 4) + +static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, + unsigned int ae, unsigned int image_num) +{ + struct icp_qat_uclo_aedata *ae_data; + struct icp_qat_uclo_encapme *encap_image; + struct icp_qat_uclo_page *page = NULL; + struct icp_qat_uclo_aeslice *ae_slice = NULL; + + ae_data = &obj_handle->ae_data[ae]; + encap_image = &obj_handle->ae_uimage[image_num]; + ae_slice = &ae_data->ae_slices[ae_data->slice_num]; + ae_slice->encap_image = encap_image; + + if (encap_image->img_ptr) { + ae_slice->ctx_mask_assigned = + encap_image->img_ptr->ctx_assigned; + ae_data->eff_ustore_size = obj_handle->ustore_phy_size; + } else { + ae_slice->ctx_mask_assigned = 0; + } + ae_slice->regions = kzalloc(sizeof(*(ae_slice->regions)), GFP_KERNEL); + if (!(ae_slice->regions)) + return -ENOMEM; + ae_slice->page = kzalloc(sizeof(*(ae_slice->page)), GFP_KERNEL); + if (!(ae_slice->page)) + goto out_err; + page = ae_slice->page; + page->encap_page = encap_image->page; + ae_slice->page->region = ae_slice->regions; + ae_data->slice_num++; + return 0; +out_err: + kfree(ae_slice->regions); + ae_slice->regions = NULL; + return -ENOMEM; +} + +static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) +{ + unsigned int ss = 0; + + if (!ae_data) { + pr_err("QAT: bad argument, ae_data is NULL\n "); + return -EINVAL; + } + + for (ss = 0; ss < ae_data->slice_num; ss++) { + kfree(ae_data->ae_slices[ss].regions); + ae_data->ae_slices[ss].regions = NULL; + kfree(ae_data->ae_slices[ss].page); + ae_data->ae_slices[ss].page = NULL; + } + return 0; +} + +static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, + unsigned int str_offset) +{ + if ((!str_table->table_len) || (str_offset > str_table->table_len)) + return NULL; + return (char *)(((unsigned long)(str_table->strings)) + str_offset); +} + +static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) +{ + int maj = hdr->maj_ver & 0xff; + int min = hdr->min_ver & 0xff; + + if (hdr->file_id != ICP_QAT_UOF_FID) { + pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); + return -EINVAL; + } + if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { + pr_err("QAT: bad uof version, major 0x%x, minor 0x%x\n", + maj, min); + return -EINVAL; + } + return 0; +} + +static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, + unsigned int addr, unsigned int *val, + unsigned int num_in_bytes) +{ + unsigned int outval; + unsigned char *ptr = (unsigned char *)val; + + while (num_in_bytes) { + memcpy(&outval, ptr, 4); + SRAM_WRITE(handle, addr, outval); + num_in_bytes -= 4; + ptr += 4; + addr += 4; + } +} + +static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int addr, + unsigned int *val, + unsigned int num_in_bytes) +{ + unsigned int outval; + unsigned char *ptr = (unsigned char *)val; + + addr >>= 0x2; /* convert to uword address */ + + while (num_in_bytes) { + memcpy(&outval, ptr, 4); + qat_hal_wr_umem(handle, ae, addr++, 1, &outval); + num_in_bytes -= 4; + ptr += 4; + } + return; +} + +static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, + struct icp_qat_uof_batch_init + *umem_init_header) +{ + struct icp_qat_uof_batch_init *umem_init; + + if (!umem_init_header) + return; + umem_init = umem_init_header->next; + while (umem_init) { + unsigned int addr, *value, size; + ae = umem_init->ae; + addr = umem_init->addr; + value = umem_init->value; + size = umem_init->size; + qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); + umem_init = umem_init->next; + } +} + +static void +qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_batch_init **base) +{ + struct icp_qat_uof_batch_init *umem_init; + + umem_init = *base; + while (umem_init) { + struct icp_qat_uof_batch_init *pre; + pre = umem_init; + umem_init = umem_init->next; + kfree(pre); + } + *base = NULL; +} + +static int qat_uclo_parse_num(char *str, unsigned int *num) +{ + char buf[16]; + unsigned long ae = 0; + int i; + + memset(buf, '\0', 16); + strncpy(buf, str, 15); + for (i = 0; i < 16; i++) { + if (!isdigit(buf[i])) { + buf[i] = '\0'; + break; + } + } + if ((kstrtoul(buf, 10, &ae))) + return -EFAULT; + + *num = (unsigned int)ae; + return 0; +} + +static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem, + unsigned int size_range, unsigned int *ae) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + char *str; + + if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { + pr_err("QAT: initmem is out of range"); + return -EINVAL; + } + if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { + pr_err("QAT: Memory scope for init_mem error\n"); + return -EINVAL; + } + str = qat_uclo_get_string(&(obj_handle->str_table), init_mem->sym_name); + if (!str) { + pr_err("QAT: AE name assigned in uof init table is NULL\n"); + return -EINVAL; + } + if (qat_uclo_parse_num(str, ae)) { + pr_err("QAT: Parse num for AE number failed\n"); + return -EINVAL; + } + if (!test_bit(*ae, (unsigned long *)&(handle->hal_handle->ae_mask))) { + pr_err("QAT: ae %d to be init is fused off\n", *ae); + return -EINVAL; + } + if (*ae >= ICP_QAT_UCLO_MAX_AE) { + pr_err("QAT: ae %d out of range\n", *ae); + return -EINVAL; + } + return 0; +} + +static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle + *handle, struct icp_qat_uof_initmem + *init_mem, unsigned int ae, + struct icp_qat_uof_batch_init + **init_tab_base) +{ + struct icp_qat_uof_batch_init *init_header, *tail; + struct icp_qat_uof_batch_init *mem_init, *tail_old; + struct icp_qat_uof_memvar_attr *mem_val_attr; + unsigned int i, flag = 0; + + mem_val_attr = + (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + + sizeof(struct icp_qat_uof_initmem)); + + init_header = *init_tab_base; + if (!init_header) { + init_header = kzalloc(sizeof(*init_header), GFP_KERNEL); + if (!init_header) + return -ENOMEM; + init_header->size = 1; + *init_tab_base = init_header; + flag = 1; + } + tail_old = init_header; + while (tail_old->next) + tail_old = tail_old->next; + tail = tail_old; + for (i = 0; i < init_mem->val_attr_num; i++) { + mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL); + if (!mem_init) + goto out_err; + mem_init->ae = ae; + mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; + mem_init->value = &mem_val_attr->value; + mem_init->size = 4; + mem_init->next = NULL; + tail->next = mem_init; + tail = mem_init; + init_header->size += qat_hal_get_ins_num(); + mem_val_attr++; + } + return 0; +out_err: + while (tail_old) { + mem_init = tail_old->next; + kfree(tail_old); + tail_old = mem_init; + } + if (flag) + kfree(*init_tab_base); + return -ENOMEM; +} + +static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int ae; + + if (qat_uclo_fetch_initmem_ae(handle, init_mem, + ICP_QAT_UCLO_MAX_LMEM_REG, &ae)) + return -EINVAL; + if (qat_uclo_create_batch_init_list(handle, init_mem, ae, + &(obj_handle->lm_init_tab[ae]))) + return -EINVAL; + return 0; +} + +static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int ae, ustore_size, uaddr, i; + + ustore_size = obj_handle->ustore_phy_size; + if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) + return -EINVAL; + if (qat_uclo_create_batch_init_list(handle, init_mem, ae, + &(obj_handle->umem_init_tab[ae]))) + return -EINVAL; + /* set the highest ustore address referenced */ + uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; + for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) { + if (obj_handle->ae_data[ae].ae_slices[i]. + encap_image->uwords_num < uaddr) + obj_handle->ae_data[ae].ae_slices[i]. + encap_image->uwords_num = uaddr; + } + return 0; +} + +#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 +static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem) +{ + unsigned int i; + struct icp_qat_uof_memvar_attr *mem_val_attr; + + mem_val_attr = + (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + + sizeof(struct icp_qat_uof_initmem)); + + switch (init_mem->region) { + case ICP_QAT_UOF_SRAM_REGION: + if ((init_mem->addr + init_mem->num_in_bytes) > + ICP_DH895XCC_PESRAM_BAR_SIZE) { + pr_err("QAT: initmem on SRAM is out of range"); + return -EINVAL; + } + for (i = 0; i < init_mem->val_attr_num; i++) { + qat_uclo_wr_sram_by_words(handle, + init_mem->addr + + mem_val_attr->offset_in_byte, + &mem_val_attr->value, 4); + mem_val_attr++; + } + break; + case ICP_QAT_UOF_LMEM_REGION: + if (qat_uclo_init_lmem_seg(handle, init_mem)) + return -EINVAL; + break; + case ICP_QAT_UOF_UMEM_REGION: + if (qat_uclo_init_umem_seg(handle, init_mem)) + return -EINVAL; + break; + default: + pr_err("QAT: initmem region error. region type=0x%x\n", + init_mem->region); + return -EINVAL; + } + return 0; +} + +static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uclo_encapme *image) +{ + unsigned int i; + struct icp_qat_uclo_encap_page *page; + struct icp_qat_uof_image *uof_image; + unsigned char ae; + unsigned int ustore_size; + unsigned int patt_pos; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + uint64_t *fill_data; + + uof_image = image->img_ptr; + fill_data = kzalloc(ICP_QAT_UCLO_MAX_USTORE * sizeof(uint64_t), + GFP_KERNEL); + if (!fill_data) + return -EFAULT; + for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) + memcpy(&fill_data[i], &uof_image->fill_pattern, + sizeof(uint64_t)); + page = image->page; + + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + if (!test_bit(ae, (unsigned long *)&(uof_image->ae_assigned))) + continue; + ustore_size = obj_handle->ae_data[ae].eff_ustore_size; + patt_pos = page->beg_addr_p + page->micro_words_num; + + qat_hal_wr_uwords(handle, (unsigned char)ae, 0, + page->beg_addr_p, &fill_data[0]); + qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos, + ustore_size - patt_pos + 1, + &fill_data[page->beg_addr_p]); + } + kfree(fill_data); + return 0; +} + +static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int i; + int status = 0; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; + int ae; + + for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { + if (initmem->num_in_bytes) { + if (qat_uclo_init_ae_memory(handle, initmem)) + return -EINVAL; + } + initmem = (struct icp_qat_uof_initmem *)((unsigned long)( + (unsigned long)initmem + + sizeof(struct icp_qat_uof_initmem)) + + (sizeof(struct icp_qat_uof_memvar_attr) * + initmem->val_attr_num)); + } + for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) { + if (qat_hal_batch_wr_lm(handle, ae, + obj_handle->lm_init_tab[ae])) { + pr_err("QAT: fail to batch init lmem for AE %d\n", ae); + return -EINVAL; + } + qat_uclo_cleanup_batch_init_list(handle, + &obj_handle->lm_init_tab[ae]); + qat_uclo_batch_wr_umem(handle, ae, + obj_handle->umem_init_tab[ae]); + qat_uclo_cleanup_batch_init_list(handle, + &obj_handle-> + umem_init_tab[ae]); + } + return status; +} + +static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, + char *chunk_id, void *cur) +{ + int i; + struct icp_qat_uof_chunkhdr *chunk_hdr = + (struct icp_qat_uof_chunkhdr *) + ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); + + for (i = 0; i < obj_hdr->num_chunks; i++) { + if ((cur < (void *)&chunk_hdr[i]) && + !(strncmp(chunk_hdr[i].chunk_id, chunk_id, + ICP_QAT_UOF_OBJID_LEN))) { + return &chunk_hdr[i]; + } + } + return NULL; +} + +static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch) +{ + int i; + unsigned int topbit = 1 << 0xF; + unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); + + reg ^= inbyte << 0x8; + for (i = 0; i < 0x8; i++) { + if (reg & topbit) + reg = (reg << 1) ^ 0x1021; + else + reg <<= 1; + } + return reg & 0xFFFF; +} + +static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num) +{ + unsigned int chksum = 0; + + if (ptr) + while (num--) + chksum = qat_uclo_calc_checksum(chksum, *ptr++); + return chksum; +} + +static struct icp_qat_uclo_objhdr * +qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, + char *chunk_id) +{ + struct icp_qat_uof_filechunkhdr *file_chunk; + struct icp_qat_uclo_objhdr *obj_hdr; + void *chunk; + int i; + + file_chunk = (struct icp_qat_uof_filechunkhdr *) + (buf + sizeof(struct icp_qat_uof_filehdr)); + for (i = 0; i < file_hdr->num_chunks; i++) { + if (!(strncmp(file_chunk->chunk_id, chunk_id, + ICP_QAT_UOF_OBJID_LEN))) { + chunk = buf + file_chunk->offset; + if (file_chunk->checksum != qat_uclo_calc_str_checksum( + (char *)chunk, file_chunk->size)) + break; + obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); + if (!obj_hdr) + break; + obj_hdr->file_buff = chunk; + obj_hdr->checksum = file_chunk->checksum; + obj_hdr->size = file_chunk->size; + return obj_hdr; + } + file_chunk++; + } + return NULL; +} + +static unsigned int +qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, + struct icp_qat_uof_image *image) +{ + struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; + struct icp_qat_uof_objtable *neigh_reg_tab; + struct icp_qat_uof_code_page *code_page; + + code_page = (struct icp_qat_uof_code_page *) + ((char *)image + sizeof(struct icp_qat_uof_image)); + uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + + code_page->uc_var_tab_offset); + imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + + code_page->imp_var_tab_offset); + imp_expr_tab = (struct icp_qat_uof_objtable *) + (encap_uof_obj->beg_uof + + code_page->imp_expr_tab_offset); + if (uc_var_tab->entry_num || imp_var_tab->entry_num || + imp_expr_tab->entry_num) { + pr_err("QAT: UOF can't contain imported variable to be parsed"); + return -EINVAL; + } + neigh_reg_tab = (struct icp_qat_uof_objtable *) + (encap_uof_obj->beg_uof + + code_page->neigh_reg_tab_offset); + if (neigh_reg_tab->entry_num) { + pr_err("QAT: UOF can't contain shared control store feature"); + return -EINVAL; + } + if (image->numpages > 1) { + pr_err("QAT: UOF can't contain multiple pages"); + return -EINVAL; + } + if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { + pr_err("QAT: UOF can't use shared control store feature"); + return -EFAULT; + } + if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { + pr_err("QAT: UOF can't use reloadable feature"); + return -EFAULT; + } + return 0; +} + +static void qat_uclo_map_image_pages(struct icp_qat_uof_encap_obj + *encap_uof_obj, + struct icp_qat_uof_image *img, + struct icp_qat_uclo_encap_page *page) +{ + struct icp_qat_uof_code_page *code_page; + struct icp_qat_uof_code_area *code_area; + struct icp_qat_uof_objtable *uword_block_tab; + struct icp_qat_uof_uword_block *uwblock; + int i; + + code_page = (struct icp_qat_uof_code_page *) + ((char *)img + sizeof(struct icp_qat_uof_image)); + page->def_page = code_page->def_page; + page->page_region = code_page->page_region; + page->beg_addr_v = code_page->beg_addr_v; + page->beg_addr_p = code_page->beg_addr_p; + code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + + code_page->code_area_offset); + page->micro_words_num = code_area->micro_words_num; + uword_block_tab = (struct icp_qat_uof_objtable *) + (encap_uof_obj->beg_uof + + code_area->uword_block_tab); + page->uwblock_num = uword_block_tab->entry_num; + uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab + + sizeof(struct icp_qat_uof_objtable)); + page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; + for (i = 0; i < uword_block_tab->entry_num; i++) + page->uwblock[i].micro_words = + (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; +} + +static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, + struct icp_qat_uclo_encapme *ae_uimage, + int max_image) +{ + int a = 0, i; + struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; + struct icp_qat_uof_image *image; + struct icp_qat_uof_objtable *ae_regtab; + struct icp_qat_uof_objtable *init_reg_sym_tab; + struct icp_qat_uof_objtable *sbreak_tab; + struct icp_qat_uof_encap_obj *encap_uof_obj = + &obj_handle->encap_uof_obj; + + for (a = 0; a < max_image; a++) { + chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, + ICP_QAT_UOF_IMAG, chunk_hdr); + if (!chunk_hdr) + break; + image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + + chunk_hdr->offset); + ae_regtab = (struct icp_qat_uof_objtable *) + (image->reg_tab_offset + + obj_handle->obj_hdr->file_buff); + ae_uimage[a].ae_reg_num = ae_regtab->entry_num; + ae_uimage[a].ae_reg = (struct icp_qat_uof_ae_reg *) + (((char *)ae_regtab) + + sizeof(struct icp_qat_uof_objtable)); + init_reg_sym_tab = (struct icp_qat_uof_objtable *) + (image->init_reg_sym_tab + + obj_handle->obj_hdr->file_buff); + ae_uimage[a].init_regsym_num = init_reg_sym_tab->entry_num; + ae_uimage[a].init_regsym = (struct icp_qat_uof_init_regsym *) + (((char *)init_reg_sym_tab) + + sizeof(struct icp_qat_uof_objtable)); + sbreak_tab = (struct icp_qat_uof_objtable *) + (image->sbreak_tab + obj_handle->obj_hdr->file_buff); + ae_uimage[a].sbreak_num = sbreak_tab->entry_num; + ae_uimage[a].sbreak = (struct icp_qat_uof_sbreak *) + (((char *)sbreak_tab) + + sizeof(struct icp_qat_uof_objtable)); + ae_uimage[a].img_ptr = image; + if (qat_uclo_check_image_compat(encap_uof_obj, image)) + goto out_err; + ae_uimage[a].page = + kzalloc(sizeof(struct icp_qat_uclo_encap_page), + GFP_KERNEL); + if (!ae_uimage[a].page) + goto out_err; + qat_uclo_map_image_pages(encap_uof_obj, image, + ae_uimage[a].page); + } + return a; +out_err: + for (i = 0; i < a; i++) + kfree(ae_uimage[i].page); + return 0; +} + +static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) +{ + int i, ae; + int mflag = 0; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + + for (ae = 0; ae <= max_ae; ae++) { + if (!test_bit(ae, (unsigned long *) + &(handle->hal_handle->ae_mask))) + continue; + for (i = 0; i < obj_handle->uimage_num; i++) { + if (!test_bit(ae, (unsigned long *) + &(obj_handle->ae_uimage[i].img_ptr->ae_assigned))) + continue; + mflag = 1; + if (qat_uclo_init_ae_data(obj_handle, ae, i)) + return -EINVAL; + } + } + if (!mflag) { + pr_err("QAT: uimage uses AE not set"); + return -EINVAL; + } + return 0; +} + +static struct icp_qat_uof_strtable * +qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, + char *tab_name, struct icp_qat_uof_strtable *str_table) +{ + struct icp_qat_uof_chunkhdr *chunk_hdr; + + chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *) + obj_hdr->file_buff, tab_name, NULL); + if (chunk_hdr) { + int hdr_size; + memcpy(&str_table->table_len, obj_hdr->file_buff + + chunk_hdr->offset, sizeof(str_table->table_len)); + hdr_size = (char *)&str_table->strings - (char *)str_table; + str_table->strings = (unsigned long)obj_hdr->file_buff + + chunk_hdr->offset + hdr_size; + return str_table; + } + return NULL; +} + +static void +qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, + struct icp_qat_uclo_init_mem_table *init_mem_tab) +{ + struct icp_qat_uof_chunkhdr *chunk_hdr; + + chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, + ICP_QAT_UOF_IMEM, NULL); + if (chunk_hdr) { + memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof + + chunk_hdr->offset, sizeof(unsigned int)); + init_mem_tab->init_mem = (struct icp_qat_uof_initmem *) + (encap_uof_obj->beg_uof + chunk_hdr->offset + + sizeof(unsigned int)); + } +} + +static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) +{ + unsigned int maj_ver, prod_type = obj_handle->prod_type; + + if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { + pr_err("QAT: uof type 0x%x not match with cur platform 0x%x\n", + obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); + return -EINVAL; + } + maj_ver = obj_handle->prod_rev & 0xff; + if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) || + (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) { + pr_err("QAT: uof majVer 0x%x out of range\n", maj_ver); + return -EINVAL; + } + return 0; +} + +static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_addr, unsigned int value) +{ + switch (reg_type) { + case ICP_GPA_ABS: + case ICP_GPB_ABS: + ctx_mask = 0; + case ICP_GPA_REL: + case ICP_GPB_REL: + return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, + reg_addr, value); + case ICP_SR_ABS: + case ICP_DR_ABS: + case ICP_SR_RD_ABS: + case ICP_DR_RD_ABS: + ctx_mask = 0; + case ICP_SR_REL: + case ICP_DR_REL: + case ICP_SR_RD_REL: + case ICP_DR_RD_REL: + return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type, + reg_addr, value); + case ICP_SR_WR_ABS: + case ICP_DR_WR_ABS: + ctx_mask = 0; + case ICP_SR_WR_REL: + case ICP_DR_WR_REL: + return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, + reg_addr, value); + case ICP_NEIGH_REL: + return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); + default: + pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type); + return -EFAULT; + } + return 0; +} + +static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, + unsigned int ae, + struct icp_qat_uclo_encapme *encap_ae) +{ + unsigned int i; + unsigned char ctx_mask; + struct icp_qat_uof_init_regsym *init_regsym; + + if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == + ICP_QAT_UCLO_MAX_CTX) + ctx_mask = 0xff; + else + ctx_mask = 0x55; + + for (i = 0; i < encap_ae->init_regsym_num; i++) { + unsigned int exp_res; + init_regsym = &encap_ae->init_regsym[i]; + exp_res = init_regsym->value; + switch (init_regsym->init_type) { + case ICP_QAT_UOF_INIT_REG: + qat_uclo_init_reg(handle, ae, ctx_mask, + (enum icp_qat_uof_regtype) + init_regsym->reg_type, + (unsigned short)init_regsym->reg_addr, + exp_res); + break; + case ICP_QAT_UOF_INIT_REG_CTX: + /* check if ctx is appropriate for the ctxMode */ + if (!((1 << init_regsym->ctx) & ctx_mask)) { + pr_err("QAT: invalid ctx num = 0x%x\n", + init_regsym->ctx); + return -EINVAL; + } + qat_uclo_init_reg(handle, ae, + (unsigned char) + (1 << init_regsym->ctx), + (enum icp_qat_uof_regtype) + init_regsym->reg_type, + (unsigned short)init_regsym->reg_addr, + exp_res); + break; + case ICP_QAT_UOF_INIT_EXPR: + pr_err("QAT: INIT_EXPR feature not supported\n"); + return -EINVAL; + case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: + pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n"); + return -EINVAL; + default: + break; + } + } + return 0; +} + +static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int s, ae; + + if (obj_handle->global_inited) + return 0; + if (obj_handle->init_mem_tab.entry_num) { + if (qat_uclo_init_memory(handle)) { + pr_err("QAT: initalize memory failed\n"); + return -EINVAL; + } + } + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { + if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) + continue; + if (qat_uclo_init_reg_sym(handle, ae, + obj_handle->ae_data[ae]. + ae_slices[s].encap_image)) + return -EINVAL; + } + } + obj_handle->global_inited = 1; + return 0; +} + +static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) +{ + unsigned char ae, nn_mode, s; + struct icp_qat_uof_image *uof_image; + struct icp_qat_uclo_aedata *ae_data; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + if (!test_bit(ae, + (unsigned long *)&(handle->hal_handle->ae_mask))) + continue; + ae_data = &(obj_handle->ae_data[ae]); + for (s = 0; s < ae_data->slice_num && s < ICP_QAT_UCLO_MAX_CTX; + s++) { + if (!(obj_handle->ae_data[ae].ae_slices[s].encap_image)) + continue; + uof_image = ae_data->ae_slices[s].encap_image->img_ptr; + if (qat_hal_set_ae_ctx_mode(handle, ae, + (char)ICP_QAT_CTX_MODE + (uof_image->ae_mode))) { + pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); + return -EFAULT; + } + nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); + if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { + pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); + return -EFAULT; + } + if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, + (char)ICP_QAT_LOC_MEM0_MODE + (uof_image->ae_mode))) { + pr_err("QAT: qat_hal_set_ae_lm_mode error\n "); + return -EFAULT; + } + if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, + (char)ICP_QAT_LOC_MEM1_MODE + (uof_image->ae_mode))) { + pr_err("QAT: qat_hal_set_ae_lm_mode error\n "); + return -EFAULT; + } + } + } + return 0; +} + +static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + struct icp_qat_uclo_encapme *image; + int a; + + for (a = 0; a < obj_handle->uimage_num; a++) { + image = &obj_handle->ae_uimage[a]; + image->uwords_num = image->page->beg_addr_p + + image->page->micro_words_num; + } +} + +static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int ae; + + obj_handle->uword_buf = kzalloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t), + GFP_KERNEL); + if (!obj_handle->uword_buf) + return -ENOMEM; + obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; + obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) + obj_handle->obj_hdr->file_buff; + obj_handle->encap_uof_obj.chunk_hdr = (struct icp_qat_uof_chunkhdr *) + obj_handle->obj_hdr->file_buff + sizeof(struct icp_qat_uof_objhdr); + obj_handle->uword_in_bytes = 6; + obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; + obj_handle->prod_rev = PID_MAJOR_REV | + (PID_MINOR_REV & handle->hal_handle->revision_id); + if (qat_uclo_check_uof_compat(obj_handle)) { + pr_err("QAT: uof incompatible\n "); + return -EINVAL; + } + obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; + if (!(obj_handle->obj_hdr->file_buff) || + !(qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, + &(obj_handle->str_table)))) { + pr_err("QAT: uof doesn't have effective images"); + goto out_err; + } + obj_handle->uimage_num = + qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage, + ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); + if (!obj_handle->uimage_num) + goto out_err; + if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { + pr_err("QAT: Bad object\n "); + goto out_check_uof_aemask_err; + } + qat_uclo_init_uword_num(handle); + qat_uclo_map_initmem_table(&(obj_handle->encap_uof_obj), + &(obj_handle->init_mem_tab)); + if (qat_uclo_set_ae_mode(handle)) + goto out_check_uof_aemask_err; + return 0; +out_check_uof_aemask_err: + for (ae = 0; ae < obj_handle->uimage_num; ae++) + kfree(obj_handle->ae_uimage[ae].page); +out_err: + kfree(obj_handle->uword_buf); + return -EFAULT; +} + +int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + struct icp_qat_uof_filehdr *filehdr; + struct icp_qat_uclo_objhandle *objhdl; + + BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= + (sizeof(handle->hal_handle->ae_mask) * 8)); + + if (!handle || !addr_ptr || mem_size < 24) + return -EINVAL; + objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); + if (!objhdl) + return -ENOMEM; + objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL); + if (!objhdl->obj_buf) + goto out_objbuf_err; + filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; + if (qat_uclo_check_format(filehdr)) + goto out_objhdr_err; + objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, + ICP_QAT_UOF_OBJS); + if (!objhdl->obj_hdr) { + pr_err("QAT: object file chunk is null\n"); + goto out_objhdr_err; + } + handle->obj_handle = objhdl; + if (qat_uclo_parse_uof_obj(handle)) + goto out_overlay_obj_err; + return 0; + +out_overlay_obj_err: + handle->obj_handle = NULL; + kfree(objhdl->obj_hdr); +out_objhdr_err: + kfree(objhdl->obj_buf); +out_objbuf_err: + kfree(objhdl); + return -ENOMEM; +} + +int qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + int a; + + kfree(obj_handle->uword_buf); + for (a = 0; a < obj_handle->uimage_num; a++) + kfree(obj_handle->ae_uimage[a].page); + + for (a = 0; a <= (int)handle->hal_handle->ae_max_num; a++) + qat_uclo_free_ae_data(&obj_handle->ae_data[a]); + kfree(obj_handle->obj_hdr); + + kfree(obj_handle->obj_buf); + kfree(obj_handle); + handle->obj_handle = NULL; + return 0; +} + +static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, + struct icp_qat_uclo_encap_page *encap_page, + uint64_t *uword, unsigned int addr_p, + unsigned int raddr, uint64_t fill) +{ + uint64_t uwrd = 0; + unsigned int i; + + if (!encap_page) { + *uword = fill; + return; + } + for (i = 0; i < encap_page->uwblock_num; i++) { + if (raddr >= encap_page->uwblock[i].start_addr && + raddr <= encap_page->uwblock[i].start_addr + + encap_page->uwblock[i].words_num - 1) { + raddr -= encap_page->uwblock[i].start_addr; + raddr *= obj_handle->uword_in_bytes; + memcpy(&uwrd, (void *)(((unsigned long) + encap_page->uwblock[i].micro_words) + raddr), + obj_handle->uword_in_bytes); + uwrd = uwrd & 0xbffffffffffull; + } + } + *uword = uwrd; + if (*uword == INVLD_UWORD) + *uword = fill; +} + +static int qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uclo_encap_page + *encap_page, unsigned int ae) +{ + unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + uint64_t fill_pat; + int status = 0; + + /* load the page starting at appropriate ustore address */ + /* get fill-pattern from an image -- they are all the same */ + memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, + sizeof(uint64_t)); + uw_physical_addr = encap_page->beg_addr_p; + uw_relative_addr = 0; + words_num = encap_page->micro_words_num; + while (words_num) { + if (words_num < UWORD_CPYBUF_SIZE) + cpylen = words_num; + else + cpylen = UWORD_CPYBUF_SIZE; + + /* load the buffer */ + for (i = 0; i < cpylen; i++) + qat_uclo_fill_uwords(obj_handle, encap_page, + &obj_handle->uword_buf[i], + uw_physical_addr + i, + uw_relative_addr + i, fill_pat); + + /* copy the buffer to ustore */ + qat_hal_wr_uwords(handle, (unsigned char)ae, + uw_physical_addr, cpylen, + obj_handle->uword_buf); + + uw_physical_addr += cpylen; + uw_relative_addr += cpylen; + words_num -= cpylen; + } + return status; +} + +static int +qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_image *image) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int ctx_mask, s; + struct icp_qat_uclo_page *page; + unsigned char ae; + int retval = 0; + int ctx; + + if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) + ctx_mask = 0xff; + else + ctx_mask = 0x55; + /* load the default page and set assigned CTX PC + * to the entrypoint address */ + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + if (!test_bit(ae, (unsigned long *)&(image->ae_assigned))) + continue; + /* find the slice to which this image is assigned */ + for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { + if (image->ctx_assigned & obj_handle->ae_data[ae]. + ae_slices[s].ctx_mask_assigned) + break; + } + if (s >= obj_handle->ae_data[ae].slice_num) + continue; + page = obj_handle->ae_data[ae].ae_slices[s].page; + if (!page->encap_page->def_page) + continue; + if (qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae)) + return -EINVAL; + + page = obj_handle->ae_data[ae].ae_slices[s].page; + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) + obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] = + (ctx_mask & (1 << ctx)) ? page : NULL; + qat_hal_set_live_ctx(handle, (unsigned char)ae, + image->ctx_assigned); + qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, + image->entry_address); + } + return retval; +} + +int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + unsigned int i; + + if (qat_uclo_init_globals(handle)) + return -EINVAL; + for (i = 0; i < obj_handle->uimage_num; i++) { + if (!(obj_handle->ae_uimage[i].img_ptr)) + return -EINVAL; + if (qat_uclo_init_ustore(handle, &(obj_handle->ae_uimage[i]))) + return -EINVAL; + if (qat_uclo_wr_uimage_pages(handle, + obj_handle->ae_uimage[i].img_ptr)) + return -EINVAL; + } + return 0; +} -- cgit v1.2.3 From b3416fb8a2f5a1653e4ab72b55907794cc10bde1 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 5 Jun 2014 13:44:04 -0700 Subject: crypto: qat - Intel(R) QAT accelengine part of fw loader This patch adds acceleration engine handler part the firmware loader. Acked-by: Bo Cui Reviewed-by: Bruce W. Allan Signed-off-by: Karen Xiang Signed-off-by: Pingchaox Yang Signed-off-by: Herbert Xu --- .../qat/qat_common/icp_qat_fw_loader_handle.h | 78 ++ drivers/crypto/qat/qat_common/icp_qat_hal.h | 125 ++ drivers/crypto/qat/qat_common/qat_hal.c | 1402 ++++++++++++++++++++ 3 files changed, 1605 insertions(+) create mode 100644 drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h create mode 100644 drivers/crypto/qat/qat_common/icp_qat_hal.h create mode 100644 drivers/crypto/qat/qat_common/qat_hal.c (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h new file mode 100644 index 000000000000..5e1aa40c0404 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h @@ -0,0 +1,78 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__ +#define __ICP_QAT_FW_LOADER_HANDLE_H__ +#include "icp_qat_uclo.h" + +struct icp_qat_fw_loader_ae_data { + unsigned int state; + unsigned int ustore_size; + unsigned int free_addr; + unsigned int free_size; + unsigned int live_ctx_mask; +}; + +struct icp_qat_fw_loader_hal_handle { + struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE]; + unsigned int ae_mask; + unsigned int slice_mask; + unsigned int revision_id; + unsigned int ae_max_num; + unsigned int upc_mask; + unsigned int max_ustore; +}; + +struct icp_qat_fw_loader_handle { + struct icp_qat_fw_loader_hal_handle *hal_handle; + void *obj_handle; + void __iomem *hal_sram_addr_v; + void __iomem *hal_cap_g_ctl_csr_addr_v; + void __iomem *hal_cap_ae_xfer_csr_addr_v; + void __iomem *hal_cap_ae_local_csr_addr_v; + void __iomem *hal_ep_csr_addr_v; +}; +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h new file mode 100644 index 000000000000..85b6d241ea82 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h @@ -0,0 +1,125 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef __ICP_QAT_HAL_H +#define __ICP_QAT_HAL_H +#include "icp_qat_fw_loader_handle.h" + +enum hal_global_csr { + MISC_CONTROL = 0x04, + ICP_RESET = 0x0c, + ICP_GLOBAL_CLK_ENABLE = 0x50 +}; + +enum hal_ae_csr { + USTORE_ADDRESS = 0x000, + USTORE_DATA_LOWER = 0x004, + USTORE_DATA_UPPER = 0x008, + ALU_OUT = 0x010, + CTX_ARB_CNTL = 0x014, + CTX_ENABLES = 0x018, + CC_ENABLE = 0x01c, + CSR_CTX_POINTER = 0x020, + CTX_STS_INDIRECT = 0x040, + ACTIVE_CTX_STATUS = 0x044, + CTX_SIG_EVENTS_INDIRECT = 0x048, + CTX_SIG_EVENTS_ACTIVE = 0x04c, + CTX_WAKEUP_EVENTS_INDIRECT = 0x050, + LM_ADDR_0_INDIRECT = 0x060, + LM_ADDR_1_INDIRECT = 0x068, + INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0, + INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8, + FUTURE_COUNT_SIGNAL_INDIRECT = 0x078, + TIMESTAMP_LOW = 0x0c0, + TIMESTAMP_HIGH = 0x0c4, + PROFILE_COUNT = 0x144, + SIGNATURE_ENABLE = 0x150, + AE_MISC_CONTROL = 0x160, + LOCAL_CSR_STATUS = 0x180, +}; + +#define UA_ECS (0x1 << 31) +#define ACS_ABO_BITPOS 31 +#define ACS_ACNO 0x7 +#define CE_ENABLE_BITPOS 0x8 +#define CE_LMADDR_0_GLOBAL_BITPOS 16 +#define CE_LMADDR_1_GLOBAL_BITPOS 17 +#define CE_NN_MODE_BITPOS 20 +#define CE_REG_PAR_ERR_BITPOS 25 +#define CE_BREAKPOINT_BITPOS 27 +#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29 +#define CE_INUSE_CONTEXTS_BITPOS 31 +#define CE_NN_MODE (0x1 << CE_NN_MODE_BITPOS) +#define CE_INUSE_CONTEXTS (0x1 << CE_INUSE_CONTEXTS_BITPOS) +#define XCWE_VOLUNTARY (0x1) +#define LCS_STATUS (0x1) +#define MMC_SHARE_CS_BITPOS 2 +#define GLOBAL_CSR 0xA00 + +#define SET_CAP_CSR(handle, csr, val) \ + ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) +#define GET_CAP_CSR(handle, csr) \ + ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr) +#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) +#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) +#define AE_CSR(handle, ae) \ + (handle->hal_cap_ae_local_csr_addr_v + \ + ((ae & handle->hal_handle->ae_mask) << 12)) +#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) +#define SET_AE_CSR(handle, ae, csr, val) \ + ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) +#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) +#define AE_XFER(handle, ae) \ + (handle->hal_cap_ae_xfer_csr_addr_v + \ + ((ae & handle->hal_handle->ae_mask) << 12)) +#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ + ((reg & 0xff) << 2)) +#define SET_AE_XFER(handle, ae, reg, val) \ + ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) +#define SRAM_WRITE(handle, addr, val) \ + ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) +#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr) +#endif diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c new file mode 100644 index 000000000000..260cd7141e74 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -0,0 +1,1402 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "icp_qat_hal.h" +#include "icp_qat_uclo.h" + +#define BAD_REGADDR 0xffff +#define MAX_RETRY_TIMES 10000 +#define INIT_CTX_ARB_VALUE 0x0 +#define INIT_CTX_ENABLE_VALUE 0x0 +#define INIT_PC_VALUE 0x0 +#define INIT_WAKEUP_EVENTS_VALUE 0x1 +#define INIT_SIG_EVENTS_VALUE 0x1 +#define INIT_CCENABLE_VALUE 0x2000 +#define RST_CSR_QAT_LSB 20 +#define RST_CSR_AE_LSB 0 +#define MC_TIMESTAMP_ENABLE (0x1 << 7) + +#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \ + (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \ + (~(1 << CE_REG_PAR_ERR_BITPOS))) +#define INSERT_IMMED_GPRA_CONST(inst, const_val) \ + (inst = ((inst & 0xFFFF00C03FFull) | \ + ((((const_val) << 12) & 0x0FF00000ull) | \ + (((const_val) << 10) & 0x0003FC00ull)))) +#define INSERT_IMMED_GPRB_CONST(inst, const_val) \ + (inst = ((inst & 0xFFFF00FFF00ull) | \ + ((((const_val) << 12) & 0x0FF00000ull) | \ + (((const_val) << 0) & 0x000000FFull)))) + +#define AE(handle, ae) handle->hal_handle->aes[ae] + +static const uint64_t inst_4b[] = { + 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, + 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, + 0x0A021000000ull +}; + +static const uint64_t inst[] = { + 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, + 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, + 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, + 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, + 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull, + 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, + 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, + 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, + 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, + 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, + 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, + 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, + 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, + 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, + 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, + 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, + 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, + 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, + 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, + 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, + 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, + 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull, +}; + +void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask) +{ + AE(handle, ae).live_ctx_mask = ctx_mask; +} + +#define CSR_RETRY_TIMES 500 +static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int csr, + unsigned int *value) +{ + unsigned int iterations = CSR_RETRY_TIMES; + + do { + *value = GET_AE_CSR(handle, ae, csr); + if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) + return 0; + } while (iterations--); + + pr_err("QAT: Read CSR timeout\n"); + return -EFAULT; +} + +static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int csr, + unsigned int value) +{ + unsigned int iterations = CSR_RETRY_TIMES; + + do { + SET_AE_CSR(handle, ae, csr, value); + if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) + return 0; + } while (iterations--); + + pr_err("QAT: Write CSR Timeout\n"); + return -EFAULT; +} + +static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + unsigned int *events) +{ + unsigned int cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int cycles, + int chk_inactive) +{ + unsigned int base_cnt = 0, cur_cnt = 0; + unsigned int csr = (1 << ACS_ABO_BITPOS); + int times = MAX_RETRY_TIMES; + int elapsed_cycles = 0; + + qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt); + base_cnt &= 0xffff; + while ((int)cycles > elapsed_cycles && times--) { + if (chk_inactive) + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr); + + qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt); + cur_cnt &= 0xffff; + elapsed_cycles = cur_cnt - base_cnt; + + if (elapsed_cycles < 0) + elapsed_cycles += 0x10000; + + /* ensure at least 8 time cycles elapsed in wait_cycles */ + if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) + return 0; + } + if (!times) { + pr_err("QAT: wait_num_cycles time out\n"); + return -EFAULT; + } + return 0; +} + +#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit)) +#define SET_BIT(wrd, bit) (wrd | 1 << bit) + +int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char mode) +{ + unsigned int csr, new_csr; + + if ((mode != 4) && (mode != 8)) { + pr_err("QAT: bad ctx mode=%d\n", mode); + return -EINVAL; + } + + /* Sets the accelaration engine context mode to either four or eight */ + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); + csr = IGNORE_W1C_MASK & csr; + new_csr = (mode == 4) ? + SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : + CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); + return 0; +} + +int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char mode) +{ + unsigned int csr, new_csr; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); + csr &= IGNORE_W1C_MASK; + + new_csr = (mode) ? + SET_BIT(csr, CE_NN_MODE_BITPOS) : + CLR_BIT(csr, CE_NN_MODE_BITPOS); + + if (new_csr != csr) + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); + + return 0; +} + +int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, enum icp_qat_uof_regtype lm_type, + unsigned char mode) +{ + unsigned int csr, new_csr; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); + csr &= IGNORE_W1C_MASK; + switch (lm_type) { + case ICP_LMEM0: + new_csr = (mode) ? + SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) : + CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS); + break; + case ICP_LMEM1: + new_csr = (mode) ? + SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) : + CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS); + break; + default: + pr_err("QAT: lmType = 0x%x\n", lm_type); + return -EINVAL; + } + + if (new_csr != csr) + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); + return 0; +} + +static unsigned short qat_hal_get_reg_addr(unsigned int type, + unsigned short reg_num) +{ + unsigned short reg_addr; + switch (type) { + case ICP_GPA_ABS: + case ICP_GPB_ABS: + reg_addr = 0x80 | (reg_num & 0x7f); + break; + case ICP_GPA_REL: + case ICP_GPB_REL: + reg_addr = reg_num & 0x1f; + break; + case ICP_SR_RD_REL: + case ICP_SR_WR_REL: + case ICP_SR_REL: + reg_addr = 0x180 | (reg_num & 0x1f); + break; + case ICP_SR_ABS: + reg_addr = 0x140 | ((reg_num & 0x3) << 1); + break; + case ICP_DR_RD_REL: + case ICP_DR_WR_REL: + case ICP_DR_REL: + reg_addr = 0x1c0 | (reg_num & 0x1f); + break; + case ICP_DR_ABS: + reg_addr = 0x100 | ((reg_num & 0x3) << 1); + break; + case ICP_NEIGH_REL: + reg_addr = 0x280 | (reg_num & 0x1f); + break; + case ICP_LMEM0: + reg_addr = 0x200; + break; + case ICP_LMEM1: + reg_addr = 0x220; + break; + case ICP_NO_DEST: + reg_addr = 0x300 | (reg_num & 0xff); + break; + default: + reg_addr = BAD_REGADDR; + break; + } + return reg_addr; +} + +void qat_hal_reset(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int ae_reset_csr; + + ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET); + ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB; + ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB; + SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr); +} + +static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, + unsigned int ae_csr, unsigned int csr_val) +{ + unsigned int ctx, cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { + if (!(ctx_mask & (1 << ctx))) + continue; + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val); + } + + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + unsigned int ae_csr, unsigned int *csr_val) +{ + unsigned int cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val); + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, + unsigned int events) +{ + unsigned int ctx, cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { + if (!(ctx_mask & (1 << ctx))) + continue; + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events); + } + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, + unsigned int events) +{ + unsigned int ctx, cur_ctx; + + qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { + if (!(ctx_mask & (1 << ctx))) + continue; + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); + qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, + events); + } + qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); +} + +static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int base_cnt, cur_cnt; + unsigned char ae; + unsigned int times = MAX_RETRY_TIMES; + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + + qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, + (unsigned int *)&base_cnt); + base_cnt &= 0xffff; + + do { + qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, + (unsigned int *)&cur_cnt); + cur_cnt &= 0xffff; + } while (times-- && (cur_cnt == base_cnt)); + + if (!times) { + pr_err("QAT: AE%d is inactive!!\n", ae); + return -EFAULT; + } + } + + return 0; +} + +static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int misc_ctl; + unsigned char ae; + + /* stop the timestamp timers */ + misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL); + if (misc_ctl & MC_TIMESTAMP_ENABLE) + SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl & + (~MC_TIMESTAMP_ENABLE)); + + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); + qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); + } + /* start timestamp timers */ + SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE); +} + +#define ESRAM_AUTO_TINIT (1<<2) +#define ESRAM_AUTO_TINIT_DONE (1<<3) +#define ESRAM_AUTO_INIT_USED_CYCLES (1640) +#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C +static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) +{ + void __iomem *csr_addr = handle->hal_ep_csr_addr_v + + ESRAM_AUTO_INIT_CSR_OFFSET; + unsigned int csr_val, times = 30; + + csr_val = ADF_CSR_RD(csr_addr, 0); + if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE)) + return 0; + + csr_val = ADF_CSR_RD(csr_addr, 0); + csr_val |= ESRAM_AUTO_TINIT; + ADF_CSR_WR(csr_addr, 0, csr_val); + + do { + qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0); + csr_val = ADF_CSR_RD(csr_addr, 0); + } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--); + if ((!times)) { + pr_err("QAT: Fail to init eSram!\n"); + return -EFAULT; + } + return 0; +} + +#define SHRAM_INIT_CYCLES 2060 +int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int ae_reset_csr; + unsigned char ae; + unsigned int clk_csr; + unsigned int times = 100; + unsigned int csr; + + /* write to the reset csr */ + ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET); + ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB); + ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB); + do { + SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr); + if (!(times--)) + goto out_err; + csr = GET_GLB_CSR(handle, ICP_RESET); + } while ((handle->hal_handle->ae_mask | + (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr); + /* enable clock */ + clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE); + clk_csr |= handle->hal_handle->ae_mask << 0; + clk_csr |= handle->hal_handle->slice_mask << 20; + SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr); + if (qat_hal_check_ae_alive(handle)) + goto out_err; + + /* Set undefined power-up/reset states to reasonable default values */ + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, + INIT_CTX_ENABLE_VALUE); + qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, + CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & + INIT_PC_VALUE); + qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); + qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); + qat_hal_put_wakeup_event(handle, ae, + ICP_QAT_UCLO_AE_ALL_CTX, + INIT_WAKEUP_EVENTS_VALUE); + qat_hal_put_sig_event(handle, ae, + ICP_QAT_UCLO_AE_ALL_CTX, + INIT_SIG_EVENTS_VALUE); + } + if (qat_hal_init_esram(handle)) + goto out_err; + if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0)) + goto out_err; + qat_hal_reset_timestamp(handle); + + return 0; +out_err: + pr_err("QAT: failed to get device out of reset\n"); + return -EFAULT; +} + +static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask) +{ + unsigned int ctx; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); + ctx &= IGNORE_W1C_MASK & + (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); +} + +static uint64_t qat_hal_parity_64bit(uint64_t word) +{ + word ^= word >> 1; + word ^= word >> 2; + word ^= word >> 4; + word ^= word >> 8; + word ^= word >> 16; + word ^= word >> 32; + return word & 1; +} + +static uint64_t qat_hal_set_uword_ecc(uint64_t uword) +{ + uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, + bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, + bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, + bit6_mask = 0xdaf69a46910ULL; + + /* clear the ecc bits */ + uword &= ~(0x7fULL << 0x2C); + uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C; + uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D; + uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E; + uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F; + uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30; + uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31; + uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32; + return uword; +} + +void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int uaddr, + unsigned int words_num, uint64_t *uword) +{ + unsigned int ustore_addr; + unsigned int i; + + qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); + uaddr |= UA_ECS; + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + for (i = 0; i < words_num; i++) { + unsigned int uwrd_lo, uwrd_hi; + uint64_t tmp; + + tmp = qat_hal_set_uword_ecc(uword[i]); + uwrd_lo = (unsigned int)(tmp & 0xffffffff); + uwrd_hi = (unsigned int)(tmp >> 0x20); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); + } + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); +} + +static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask) +{ + unsigned int ctx; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); + ctx &= IGNORE_W1C_MASK; + ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; + ctx |= (ctx_mask << CE_ENABLE_BITPOS); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); +} + +static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) +{ + unsigned char ae; + unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; + int times = MAX_RETRY_TIMES; + unsigned int csr_val = 0; + unsigned short reg; + unsigned int savctx = 0; + int ret = 0; + + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { + qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, + reg, 0); + qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, + reg, 0); + } + qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); + csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); + qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val); + csr_val &= IGNORE_W1C_MASK; + csr_val |= CE_NN_MODE; + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); + qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst), + (uint64_t *)inst); + qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & + INIT_PC_VALUE); + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); + qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); + qat_hal_wr_indr_csr(handle, ae, ctx_mask, + CTX_SIG_EVENTS_INDIRECT, 0); + qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); + qat_hal_enable_ctx(handle, ae, ctx_mask); + } + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + if (!(handle->hal_handle->ae_mask & (1 << ae))) + continue; + /* wait for AE to finish */ + do { + ret = qat_hal_wait_cycles(handle, ae, 20, 1); + } while (ret && times--); + + if (!times) { + pr_err("QAT: clear GPR of AE %d failed", ae); + return -EINVAL; + } + qat_hal_disable_ctx(handle, ae, ctx_mask); + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, + savctx & ACS_ACNO); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, + INIT_CTX_ENABLE_VALUE); + qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & + INIT_PC_VALUE); + qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); + qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); + qat_hal_put_wakeup_event(handle, ae, ctx_mask, + INIT_WAKEUP_EVENTS_VALUE); + qat_hal_put_sig_event(handle, ae, ctx_mask, + INIT_SIG_EVENTS_VALUE); + } + return 0; +} + +#define ICP_DH895XCC_AE_OFFSET 0x20000 +#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) +#define LOCAL_TO_XFER_REG_OFFSET 0x800 +#define ICP_DH895XCC_EP_OFFSET 0x3a000 +#define ICP_DH895XCC_PMISC_BAR 1 +int qat_hal_init(struct adf_accel_dev *accel_dev) +{ + unsigned char ae = 0; + unsigned int csr_val = 0; + unsigned int max_en_ae_num = 0; + struct icp_qat_fw_loader_handle *handle = NULL; + struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR]; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + goto out_handle; + + handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + + ICP_DH895XCC_CAP_OFFSET; + handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr + + ICP_DH895XCC_AE_OFFSET; + handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET; + handle->hal_cap_ae_local_csr_addr_v = + handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; + + handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); + if (!handle->hal_handle) + goto out_hal_handle; + handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid; + handle->hal_handle->ae_mask = hw_data->ae_mask; + handle->hal_handle->slice_mask = hw_data->accel_mask; + /* create AE objects */ + handle->hal_handle->upc_mask = 0x1ffff; + handle->hal_handle->max_ustore = 0x4000; + for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) { + if (!(hw_data->ae_mask & (1 << ae))) + continue; + handle->hal_handle->aes[ae].free_addr = 0; + handle->hal_handle->aes[ae].free_size = + handle->hal_handle->max_ustore; + handle->hal_handle->aes[ae].ustore_size = + handle->hal_handle->max_ustore; + handle->hal_handle->aes[ae].live_ctx_mask = + ICP_QAT_UCLO_AE_ALL_CTX; + max_en_ae_num = ae; + } + handle->hal_handle->ae_max_num = max_en_ae_num; + /* take all AEs out of reset */ + if (qat_hal_clr_reset(handle)) { + pr_err("QAT: qat_hal_clr_reset error\n"); + goto out_err; + } + if (qat_hal_clear_gpr(handle)) + goto out_err; + /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ + for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + if (!(hw_data->ae_mask & (1 << ae))) + continue; + qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); + csr_val |= 0x1; + qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); + } + accel_dev->fw_loader->fw_loader = handle; + return 0; + +out_err: + kfree(handle->hal_handle); +out_hal_handle: + kfree(handle); +out_handle: + return -EFAULT; +} + +void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) +{ + if (!handle) + return; + kfree(handle->hal_handle); + kfree(handle); +} + +void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int ctx_mask) +{ + qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & + ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); + qat_hal_enable_ctx(handle, ae, ctx_mask); +} + +void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, + unsigned int ctx_mask) +{ + qat_hal_disable_ctx(handle, ae, ctx_mask); +} + +void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int ctx_mask, unsigned int upc) +{ + qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & upc); +} + +static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int uaddr, + unsigned int words_num, uint64_t *uword) +{ + unsigned int i, uwrd_lo, uwrd_hi; + unsigned int ustore_addr, misc_control; + + qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control); + qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, + misc_control & 0xfffffffb); + qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); + uaddr |= UA_ECS; + for (i = 0; i < words_num; i++) { + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + uaddr++; + qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo); + qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi); + uword[i] = uwrd_hi; + uword[i] = (uword[i] << 0x20) | uwrd_lo; + } + qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control); + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); +} + +static int qat_hal_count_bits(unsigned int word) +{ + int n = 0; + + while (word) { + n++; + word &= word - 1; + } + return n; +} + +void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned int uaddr, + unsigned int words_num, unsigned int *data) +{ + unsigned int i, ustore_addr; + + qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); + uaddr |= UA_ECS; + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + for (i = 0; i < words_num; i++) { + unsigned int uwrd_lo, uwrd_hi, tmp; + uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) | + ((data[i] & 0xff00) << 2) | + (0x3 << 8) | (data[i] & 0xff); + uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28); + uwrd_hi |= (qat_hal_count_bits(data[i] & 0xffff) & 0x1) << 8; + tmp = ((data[i] >> 0x10) & 0xffff); + uwrd_hi |= (qat_hal_count_bits(tmp) & 0x1) << 9; + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); + } + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); +} + +#define MAX_EXEC_INST 100 +static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + uint64_t *micro_inst, unsigned int inst_num, + int code_off, unsigned int max_cycle, + unsigned int *endpc) +{ + uint64_t savuwords[MAX_EXEC_INST]; + unsigned int ind_lm_addr0, ind_lm_addr1; + unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; + unsigned int ind_cnt_sig; + unsigned int ind_sig, act_sig; + unsigned int csr_val = 0, newcsr_val; + unsigned int savctx; + unsigned int savcc, wakeup_events, savpc; + unsigned int ctxarb_ctl, ctx_enables; + + if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) { + pr_err("QAT: invalid instructs inst_num=%d, micro_inst=0x%p\n ", + inst_num, (unsigned int *)micro_inst); + return -EINVAL; + } + /* save current context */ + qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0); + qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1); + qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, + &ind_lm_addr_byte0); + qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, + &ind_lm_addr_byte1); + if (inst_num <= MAX_EXEC_INST) + qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); + qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); + qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc); + savpc = (savpc & handle->hal_handle->upc_mask) >> 0; + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + ctx_enables &= IGNORE_W1C_MASK; + qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc); + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); + qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl); + qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, + &ind_cnt_sig); + qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig); + qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig); + /* execute micro codes */ + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0); + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO); + if (code_off) + qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff); + qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0); + qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); + qat_hal_enable_ctx(handle, ae, (1 << ctx)); + /* wait for micro codes to finish */ + if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0) + return -EFAULT; + if (endpc) { + unsigned int ctx_status; + qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, + &ctx_status); + *endpc = ctx_status & handle->hal_handle->upc_mask; + } + /* retore to saved context */ + qat_hal_disable_ctx(handle, ae, (1 << ctx)); + if (inst_num <= MAX_EXEC_INST) + qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords); + qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, + handle->hal_handle->upc_mask & savpc); + qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); + newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); + qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); + qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); + qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + LM_ADDR_0_INDIRECT, ind_lm_addr0); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + LM_ADDR_1_INDIRECT, ind_lm_addr1); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig); + qat_hal_wr_indr_csr(handle, ae, (1 << ctx), + CTX_SIG_EVENTS_INDIRECT, ind_sig); + qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + + return 0; +} + +static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int *data) +{ + unsigned int savctx, uaddr, uwrd_lo, uwrd_hi; + unsigned int ctxarb_cntl, ustore_addr, ctx_enables; + unsigned short reg_addr; + int status = 0; + uint64_t insts, savuword; + + reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); + if (reg_addr == BAD_REGADDR) { + pr_err("QAT: bad regaddr=0x%x\n", reg_addr); + return -EINVAL; + } + switch (reg_type) { + case ICP_GPA_REL: + insts = 0xA070000000ull | (reg_addr & 0x3ff); + break; + default: + insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); + break; + } + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); + qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl); + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + ctx_enables &= IGNORE_W1C_MASK; + if (ctx != (savctx & ACS_ACNO)) + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, + ctx & ACS_ACNO); + qat_hal_get_uwords(handle, ae, 0, 1, &savuword); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); + uaddr = UA_ECS; + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + insts = qat_hal_set_uword_ecc(insts); + uwrd_lo = (unsigned int)(insts & 0xffffffff); + uwrd_hi = (unsigned int)(insts >> 0x20); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); + qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); + /* delay for at least 8 cycles */ + qat_hal_wait_cycles(handle, ae, 0x8, 0); + /* + * read ALU output + * the instruction should have been executed + * prior to clearing the ECS in putUwords + */ + qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data); + qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); + qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); + if (ctx != (savctx & ACS_ACNO)) + qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, + savctx & ACS_ACNO); + qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + + return status; +} + +static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int data) +{ + unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; + uint64_t insts[] = { + 0x0F440000000ull, + 0x0F040000000ull, + 0x0F0000C0300ull, + 0x0E000010000ull + }; + const int num_inst = ARRAY_SIZE(insts), code_off = 1; + const int imm_w1 = 0, imm_w0 = 1; + + dest_addr = qat_hal_get_reg_addr(reg_type, reg_num); + if (dest_addr == BAD_REGADDR) { + pr_err("QAT: bad destAddr=0x%x\n", dest_addr); + return -EINVAL; + } + + data16lo = 0xffff & data; + data16hi = 0xffff & (data >> 0x10); + src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short) + (0xff & data16hi)); + src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short) + (0xff & data16lo)); + switch (reg_type) { + case ICP_GPA_REL: + insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | + ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); + insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | + ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); + break; + default: + insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | + ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); + + insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | + ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); + break; + } + + return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst, + code_off, num_inst * 0x5, NULL); +} + +int qat_hal_get_ins_num(void) +{ + return ARRAY_SIZE(inst_4b); +} + +static int qat_hal_concat_micro_code(uint64_t *micro_inst, + unsigned int inst_num, unsigned int size, + unsigned int addr, unsigned int *value) +{ + int i, val_indx; + unsigned int cur_value; + const uint64_t *inst_arr; + int fixup_offset; + int usize = 0; + int orig_num; + + orig_num = inst_num; + val_indx = 0; + cur_value = value[val_indx++]; + inst_arr = inst_4b; + usize = ARRAY_SIZE(inst_4b); + fixup_offset = inst_num; + for (i = 0; i < usize; i++) + micro_inst[inst_num++] = inst_arr[i]; + INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr)); + fixup_offset++; + INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0); + fixup_offset++; + INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0)); + fixup_offset++; + INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10)); + + return inst_num - orig_num; +} + +static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + int *pfirst_exec, uint64_t *micro_inst, + unsigned int inst_num) +{ + int stat = 0; + unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0; + unsigned int gprb0 = 0, gprb1 = 0; + + if (*pfirst_exec) { + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0); + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1); + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2); + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0); + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1); + *pfirst_exec = 0; + } + stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1, + inst_num * 0x5, NULL); + if (stat != 0) + return -EFAULT; + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1); + + return 0; +} + +int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, + struct icp_qat_uof_batch_init *lm_init_header) +{ + struct icp_qat_uof_batch_init *plm_init; + uint64_t *micro_inst_arry; + int micro_inst_num; + int alloc_inst_size; + int first_exec = 1; + int stat = 0; + + plm_init = lm_init_header->next; + alloc_inst_size = lm_init_header->size; + if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) + alloc_inst_size = handle->hal_handle->max_ustore; + micro_inst_arry = kmalloc(alloc_inst_size * sizeof(uint64_t), + GFP_KERNEL); + if (!micro_inst_arry) + return -ENOMEM; + micro_inst_num = 0; + while (plm_init) { + unsigned int addr, *value, size; + + ae = plm_init->ae; + addr = plm_init->addr; + value = plm_init->value; + size = plm_init->size; + micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry, + micro_inst_num, + size, addr, value); + plm_init = plm_init->next; + } + /* exec micro codes */ + if (micro_inst_arry && (micro_inst_num > 0)) { + micro_inst_arry[micro_inst_num++] = 0x0E000010000ull; + stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec, + micro_inst_arry, + micro_inst_num); + } + kfree(micro_inst_arry); + return stat; +} + +static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int val) +{ + int status = 0; + unsigned int reg_addr; + unsigned int ctx_enables; + unsigned short mask; + unsigned short dr_offset = 0x10; + + status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + if (CE_INUSE_CONTEXTS & ctx_enables) { + if (ctx & 0x1) { + pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); + return -EINVAL; + } + mask = 0x1f; + dr_offset = 0x20; + } else { + mask = 0x0f; + } + if (reg_num & ~mask) + return -EINVAL; + reg_addr = reg_num + (ctx << 0x5); + switch (reg_type) { + case ICP_SR_RD_REL: + case ICP_SR_REL: + SET_AE_XFER(handle, ae, reg_addr, val); + break; + case ICP_DR_RD_REL: + case ICP_DR_REL: + SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val); + break; + default: + status = -EINVAL; + break; + } + return status; +} + +static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int data) +{ + unsigned int gprval, ctx_enables; + unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi, + data16low; + unsigned short reg_mask; + int status = 0; + uint64_t micro_inst[] = { + 0x0F440000000ull, + 0x0F040000000ull, + 0x0A000000000ull, + 0x0F0000C0300ull, + 0x0E000010000ull + }; + const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; + const unsigned short gprnum = 0, dly = num_inst * 0x5; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + if (CE_INUSE_CONTEXTS & ctx_enables) { + if (ctx & 0x1) { + pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); + return -EINVAL; + } + reg_mask = (unsigned short)~0x1f; + } else { + reg_mask = (unsigned short)~0xf; + } + if (reg_num & reg_mask) + return -EINVAL; + xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num); + if (xfr_addr == BAD_REGADDR) { + pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr); + return -EINVAL; + } + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); + gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum); + data16low = 0xffff & data; + data16hi = 0xffff & (data >> 0x10); + src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, + (unsigned short)(0xff & data16hi)); + src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, + (unsigned short)(0xff & data16low)); + micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) | + ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); + micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) | + ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); + micro_inst[0x2] = micro_inst[0x2] | + ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10); + status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst, + code_off, dly, NULL); + qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval); + return status; +} + +static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx, + unsigned short nn, unsigned int val) +{ + unsigned int ctx_enables; + int stat = 0; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + ctx_enables &= IGNORE_W1C_MASK; + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); + + stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val); + qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + return stat; +} + +static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle + *handle, unsigned char ae, + unsigned short absreg_num, + unsigned short *relreg, + unsigned char *ctx) +{ + unsigned int ctx_enables; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); + if (ctx_enables & CE_INUSE_CONTEXTS) { + /* 4-ctx mode */ + *relreg = absreg_num & 0x1F; + *ctx = (absreg_num >> 0x4) & 0x6; + } else { + /* 8-ctx mode */ + *relreg = absreg_num & 0x0F; + *ctx = (absreg_num >> 0x4) & 0x7; + } + return 0; +} + +int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata) +{ + int stat = 0; + unsigned short reg; + unsigned char ctx = 0; + enum icp_qat_uof_regtype type; + + if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG) + return -EINVAL; + + do { + if (ctx_mask == 0) { + qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, + &ctx); + type = reg_type - 1; + } else { + reg = reg_num; + type = reg_type; + if (!test_bit(ctx, (unsigned long *)&ctx_mask)) + continue; + } + stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata); + if (stat) { + pr_err("QAT: write gpr fail\n"); + return -EINVAL; + } + } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); + + return 0; +} + +int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata) +{ + int stat = 0; + unsigned short reg; + unsigned char ctx = 0; + enum icp_qat_uof_regtype type; + + if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) + return -EINVAL; + + do { + if (ctx_mask == 0) { + qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, + &ctx); + type = reg_type - 3; + } else { + reg = reg_num; + type = reg_type; + if (!test_bit(ctx, (unsigned long *)&ctx_mask)) + continue; + } + stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg, + regdata); + if (stat) { + pr_err("QAT: write wr xfer fail\n"); + return -EINVAL; + } + } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); + + return 0; +} + +int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + enum icp_qat_uof_regtype reg_type, + unsigned short reg_num, unsigned int regdata) +{ + int stat = 0; + unsigned short reg; + unsigned char ctx = 0; + enum icp_qat_uof_regtype type; + + if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) + return -EINVAL; + + do { + if (ctx_mask == 0) { + qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, + &ctx); + type = reg_type - 3; + } else { + reg = reg_num; + type = reg_type; + if (!test_bit(ctx, (unsigned long *)&ctx_mask)) + continue; + } + stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg, + regdata); + if (stat) { + pr_err("QAT: write rd xfer fail\n"); + return -EINVAL; + } + } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); + + return 0; +} + +int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, + unsigned char ae, unsigned char ctx_mask, + unsigned short reg_num, unsigned int regdata) +{ + int stat = 0; + unsigned char ctx; + + if (ctx_mask == 0) + return -EINVAL; + + for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { + if (!test_bit(ctx, (unsigned long *)&ctx_mask)) + continue; + stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata); + if (stat) { + pr_err("QAT: write neigh error\n"); + return -EINVAL; + } + } + + return 0; +} -- cgit v1.2.3 From 7afa232e76ced910a191a3f6669d8f48bcb46b3e Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 5 Jun 2014 13:44:20 -0700 Subject: crypto: qat - Intel(R) QAT DH895xcc accelerator This patch adds DH895xCC hardware specific code. It hooks to the common infrastructure and provides acceleration for crypto algorithms. Acked-by: John Griffin Reviewed-by: Bruce W. Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_dh895xcc/Makefile | 8 + drivers/crypto/qat/qat_dh895xcc/adf_admin.c | 144 +++++++ .../crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 214 ++++++++++ .../crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | 86 ++++ drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 448 +++++++++++++++++++++ drivers/crypto/qat/qat_dh895xcc/adf_drv.h | 67 +++ drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c | 159 ++++++++ drivers/crypto/qat/qat_dh895xcc/adf_isr.c | 266 ++++++++++++ drivers/crypto/qat/qat_dh895xcc/qat_admin.c | 107 +++++ 9 files changed, 1499 insertions(+) create mode 100644 drivers/crypto/qat/qat_dh895xcc/Makefile create mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_admin.c create mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c create mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h create mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_drv.c create mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_drv.h create mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c create mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_isr.c create mode 100644 drivers/crypto/qat/qat_dh895xcc/qat_admin.c (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile new file mode 100644 index 000000000000..8e4924d6a54b --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile @@ -0,0 +1,8 @@ +ccflags-y := -I$(CURDIR)/drivers/crypto/qat/qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o +qat_dh895xcc-objs := adf_drv.o \ + adf_isr.o \ + adf_dh895xcc_hw_data.o \ + adf_hw_arbiter.o \ + qat_admin.o \ + adf_admin.o diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c new file mode 100644 index 000000000000..978d6c56639d --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c @@ -0,0 +1,144 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include "adf_drv.h" +#include "adf_dh895xcc_hw_data.h" + +#define ADF_ADMINMSG_LEN 32 + +struct adf_admin_comms { + dma_addr_t phy_addr; + void *virt_addr; + void __iomem *mailbox_addr; + struct mutex lock; /* protects adf_admin_comms struct */ +}; + +int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, + uint32_t ae, void *in, void *out) +{ + struct adf_admin_comms *admin = accel_dev->admin; + int offset = ae * ADF_ADMINMSG_LEN * 2; + void __iomem *mailbox = admin->mailbox_addr; + int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; + int times, received; + + mutex_lock(&admin->lock); + + if (ADF_CSR_RD(mailbox, mb_offset) == 1) { + mutex_unlock(&admin->lock); + return -EAGAIN; + } + + memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); + ADF_CSR_WR(mailbox, mb_offset, 1); + received = 0; + for (times = 0; times < 50; times++) { + msleep(20); + if (ADF_CSR_RD(mailbox, mb_offset) == 0) { + received = 1; + break; + } + } + if (received) + memcpy(out, admin->virt_addr + offset + + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); + else + pr_err("QAT: Failed to send admin msg to accelerator\n"); + + mutex_unlock(&admin->lock); + return received ? 0 : -EFAULT; +} + +int adf_init_admin_comms(struct adf_accel_dev *accel_dev) +{ + struct adf_admin_comms *admin; + struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; + void __iomem *csr = pmisc->virt_addr; + void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; + uint64_t reg_val; + + admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, + accel_dev->numa_node); + if (!admin) + return -ENOMEM; + admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + &admin->phy_addr, GFP_KERNEL); + if (!admin->virt_addr) { + dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); + kfree(admin); + return -ENOMEM; + } + reg_val = (uint64_t)admin->phy_addr; + ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); + ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); + mutex_init(&admin->lock); + admin->mailbox_addr = mailbox; + accel_dev->admin = admin; + return 0; +} + +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) +{ + struct adf_admin_comms *admin = accel_dev->admin; + + if (!admin) + return; + + if (admin->virt_addr) + dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + admin->virt_addr, admin->phy_addr); + + mutex_destroy(&admin->lock); + kfree(admin); + accel_dev->admin = NULL; +} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c new file mode 100644 index 000000000000..ef05825cc651 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -0,0 +1,214 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include "adf_dh895xcc_hw_data.h" +#include "adf_drv.h" + +/* Worker thread to service arbiter mappings based on dev SKUs */ +static const uint32_t thrd_to_arb_map_sku4[] = { + 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, + 0x00000000, 0x00000000, 0x00000000, 0x00000000 +}; + +static const uint32_t thrd_to_arb_map_sku6[] = { + 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222 +}; + +static struct adf_hw_device_class dh895xcc_class = { + .name = ADF_DH895XCC_DEVICE_NAME, + .type = DEV_DH895XCC, + .instances = 0 +}; + +static uint32_t get_accel_mask(uint32_t fuse) +{ + return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & + ADF_DH895XCC_ACCELERATORS_MASK; +} + +static uint32_t get_ae_mask(uint32_t fuse) +{ + return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK; +} + +static uint32_t get_num_accels(struct adf_hw_device_data *self) +{ + uint32_t i, ctr = 0; + + if (!self || !self->accel_mask) + return 0; + + for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) { + if (self->accel_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static uint32_t get_num_aes(struct adf_hw_device_data *self) +{ + uint32_t i, ctr = 0; + + if (!self || !self->ae_mask) + return 0; + + for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) { + if (self->ae_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static uint32_t get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_DH895XCC_PMISC_BAR; +} + +static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_DH895XCC_ETR_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) + >> ADF_DH895XCC_FUSECTL_SKU_SHIFT; + + switch (sku) { + case ADF_DH895XCC_FUSECTL_SKU_1: + return DEV_SKU_1; + case ADF_DH895XCC_FUSECTL_SKU_2: + return DEV_SKU_2; + case ADF_DH895XCC_FUSECTL_SKU_3: + return DEV_SKU_3; + case ADF_DH895XCC_FUSECTL_SKU_4: + return DEV_SKU_4; + default: + return DEV_SKU_UNKNOWN; + } + return DEV_SKU_UNKNOWN; +} + +void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + uint32_t const **arb_map_config) +{ + switch (accel_dev->accel_pci_dev.sku) { + case DEV_SKU_1: + *arb_map_config = thrd_to_arb_map_sku4; + break; + + case DEV_SKU_2: + case DEV_SKU_4: + *arb_map_config = thrd_to_arb_map_sku6; + break; + default: + pr_err("QAT: The configuration doesn't match any SKU"); + *arb_map_config = NULL; + } +} + +static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + unsigned int val, i; + + /* Enable Accel Engine error detection & correction */ + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i)); + val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR; + ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val); + val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i)); + val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR; + ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val); + } + + /* Enable shared memory error detection & correction */ + for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i)); + val |= ADF_DH895XCC_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val); + val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i)); + val |= ADF_DH895XCC_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val); + } +} + +void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &dh895xcc_class; + hw_data->instance_id = dh895xcc_class.instances++; + hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS; + hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; + hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable; + hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_sku = get_sku; + hw_data->fw_name = ADF_DH895XCC_FW; +} + +void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h new file mode 100644 index 000000000000..c5ce236aa979 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -0,0 +1,86 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_DH895x_HW_DATA_H_ +#define ADF_DH895x_HW_DATA_H_ + +/* PCIe configuration space */ +#define ADF_DH895XCC_RX_RINGS_OFFSET 8 +#define ADF_DH895XCC_TX_RINGS_MASK 0xFF +#define ADF_DH895XCC_FUSECTL_OFFSET 0x40 +#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 +#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 +#define ADF_DH895XCC_FUSECTL_SKU_1 0x0 +#define ADF_DH895XCC_FUSECTL_SKU_2 0x1 +#define ADF_DH895XCC_FUSECTL_SKU_3 0x2 +#define ADF_DH895XCC_FUSECTL_SKU_4 0x3 +#define ADF_DH895XCC_MAX_ACCELERATORS 6 +#define ADF_DH895XCC_MAX_ACCELENGINES 12 +#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 18 +#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F +#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF +#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C +#define ADF_DH895XCC_ETR_MAX_BANKS 32 +#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) +#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) +#define ADF_DH895XCC_SMIA0_MASK 0xFFFF +#define ADF_DH895XCC_SMIA1_MASK 0x1 +/* Error detection and correction */ +#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) +#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) +#define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28) +#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12) +#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18) +#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) +#define ADF_DH895XCC_ERRSSMSH_EN (1 << 3) + +/* Admin Messages Registers */ +#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) +#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) +#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 +#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 +#define ADF_DH895XCC_FW "qat_895xcc.bin" +#endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c new file mode 100644 index 000000000000..1da2f78b2058 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -0,0 +1,448 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_dh895xcc_hw_data.h" +#include "adf_drv.h" + +static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME; + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = adf_driver_name, + .probe = adf_probe, + .remove = adf_remove +}; + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + int i; + + adf_exit_admin_comms(accel_dev); + adf_exit_arb(accel_dev); + adf_cleanup_etr_data(accel_dev); + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_dev->hw_device->pci_dev_id) { + case ADF_DH895XCC_PCI_DEVICE_ID: + adf_clean_hw_data_dh895xcc(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + adf_devmgr_rm_dev(accel_dev); + pci_release_regions(accel_pci_dev->pci_dev); + pci_disable_device(accel_pci_dev->pci_dev); + kfree(accel_dev); +} + +static uint8_t adf_get_dev_node_id(struct pci_dev *pdev) +{ + unsigned int bus_per_cpu = 0; + struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1); + + if (!c->phys_proc_id) + return 0; + + bus_per_cpu = 256 / (c->phys_proc_id + 1); + + if (bus_per_cpu != 0) + return pdev->bus->number / bus_per_cpu; + return 0; +} + +static int qat_dev_start(struct adf_accel_dev *accel_dev) +{ + int cpus = num_online_cpus(); + int banks = GET_MAX_BANKS(accel_dev); + int instances = min(cpus, banks); + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int i; + unsigned long val; + + if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) + goto err; + if (adf_cfg_section_add(accel_dev, "Accelerator0")) + goto err; + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 4; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 8; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 10; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 12; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, (void *)&val, ADF_DEC)) + goto err; + } + + val = i; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + ADF_NUM_CY, (void *)&val, ADF_DEC)) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + return adf_dev_start(accel_dev); +err: + dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); + return -EINVAL; +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + void __iomem *pmisc_bar_addr = NULL; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + uint8_t node; + int ret; + + switch (ent->device) { + case ADF_DH895XCC_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + node = adf_get_dev_node_id(pdev); + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); + if (!accel_dev) + return -ENOMEM; + + accel_dev->numa_node = node; + INIT_LIST_HEAD(&accel_dev->crypto_list); + + /* Add accel device to accel table. + * This should be called before adf_cleanup_accel is called */ + if (adf_devmgr_add_dev(accel_dev)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + switch (ent->device) { + case ADF_DH895XCC_PCI_DEVICE_ID: + adf_init_hw_data_dh895xcc(accel_dev->hw_device); + break; + default: + return -ENODEV; + } + accel_pci_dev = &accel_dev->accel_pci_dev; + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, + &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + accel_pci_dev->pci_dev = pdev; + /* If the device has no acceleration engines then ignore it. */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + ((~hw_data->ae_mask) & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found"); + ret = -EFAULT; + goto out_err; + } + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX, + hw_data->dev_class->name, hw_data->instance_id); + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir\n"); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, adf_driver_name)) { + ret = -EFAULT; + goto out_err; + } + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET, + &hw_data->accel_capabilities_mask); + + /* Find and map all the device's BARS */ + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + bar_nr = i * 2; + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", i); + ret = -EFAULT; + goto out_err; + } + if (i == ADF_DH895XCC_PMISC_BAR) + pmisc_bar_addr = bar->virt_addr; + } + pci_set_master(pdev); + + if (adf_enable_aer(accel_dev, &adf_driver)) { + dev_err(&pdev->dev, "Failed to enable aer\n"); + ret = -EFAULT; + goto out_err; + } + + if (adf_init_etr_data(accel_dev)) { + dev_err(&pdev->dev, "Failed initialize etr\n"); + ret = -EFAULT; + goto out_err; + } + + if (adf_init_admin_comms(accel_dev)) { + dev_err(&pdev->dev, "Failed initialize admin comms\n"); + ret = -EFAULT; + goto out_err; + } + + if (adf_init_arb(accel_dev)) { + dev_err(&pdev->dev, "Failed initialize hw arbiter\n"); + ret = -EFAULT; + goto out_err; + } + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state\n"); + ret = -ENOMEM; + goto out_err; + } + + /* Enable bundle and misc interrupts */ + ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, + ADF_DH895XCC_SMIA0_MASK); + ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, + ADF_DH895XCC_SMIA1_MASK); + + ret = qat_dev_start(accel_dev); + if (ret) { + adf_dev_stop(accel_dev); + goto out_err; + } + + return 0; +out_err: + adf_cleanup_accel(accel_dev); + return ret; +} + +static void __exit adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + adf_disable_aer(accel_dev); + adf_cleanup_accel(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + if (qat_admin_register()) + return -EFAULT; + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); + qat_admin_unregister(); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h new file mode 100644 index 000000000000..a2fbb6ce75cd --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h @@ -0,0 +1,67 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_DH895x_DRV_H_ +#define ADF_DH895x_DRV_H_ +#include +#include + +void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); +int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); +void adf_isr_resource_free(struct adf_accel_dev *accel_dev); +void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); +void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + uint32_t const **arb_map_config); +int adf_init_admin_comms(struct adf_accel_dev *accel_dev); +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); +int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, + uint32_t ae, void *in, void *out); +int qat_admin_register(void); +int qat_admin_unregister(void); +int adf_init_arb(struct adf_accel_dev *accel_dev); +void adf_exit_arb(struct adf_accel_dev *accel_dev); +#endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c new file mode 100644 index 000000000000..36ffa70211c9 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c @@ -0,0 +1,159 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include "adf_drv.h" + +#define ADF_ARB_NUM 4 +#define ADF_ARB_REQ_RING_NUM 8 +#define ADF_ARB_REG_SIZE 0x4 +#define ADF_ARB_WTR_SIZE 0x20 +#define ADF_ARB_OFFSET 0x30000 +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_WTR_OFFSET 0x010 +#define ADF_ARB_RO_EN_OFFSET 0x090 +#define ADF_ARB_WQCFG_OFFSET 0x100 +#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 +#define ADF_ARB_WRK_2_SER_MAP 10 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * index), value); + +#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value); + +#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \ + (ADF_ARB_REG_SIZE * index), value); + +#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \ + (ADF_ARB_REG_SIZE * index), value); + +#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WRK_2_SER_MAP_OFFSET) + \ + (ADF_ARB_REG_SIZE * index), value); + +#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value); + +int adf_init_arb(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = accel_dev->transport->banks[0].csr_addr; + uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; + uint32_t arb, i; + const uint32_t *thd_2_arb_cfg; + + /* Service arb configured for 32 bytes responses and + * ring flow control check enabled. */ + for (arb = 0; arb < ADF_ARB_NUM; arb++) + WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg); + + /* Setup service weighting */ + for (arb = 0; arb < ADF_ARB_NUM; arb++) + for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) + WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF); + + /* Setup ring response ordering */ + for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) + WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); + + /* Setup worker queue registers */ + for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) + WRITE_CSR_ARB_WQCFG(csr, i, i); + + /* Map worker threads to service arbiters */ + adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg); + + if (!thd_2_arb_cfg) + return -EFAULT; + + for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) + WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i)); + + return 0; +} + +void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring) +{ + WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, + ring->bank->bank_number, + ring->bank->ring_mask & 0xFF); +} + +void adf_exit_arb(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr; + unsigned int i; + + if (!accel_dev->transport) + return; + + csr = accel_dev->transport->banks[0].csr_addr; + + /* Reset arbiter configuration */ + for (i = 0; i < ADF_ARB_NUM; i++) + WRITE_CSR_ARB_SARCONFIG(csr, i, 0); + + /* Shutdown work queue */ + for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) + WRITE_CSR_ARB_WQCFG(csr, i, 0); + + /* Unmap worker threads to service arbiters */ + for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) + WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0); + + /* Disable arbitration on all rings */ + for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) + WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); +} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c new file mode 100644 index 000000000000..77e2d3a573f4 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c @@ -0,0 +1,266 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_drv.h" + +static int adf_enable_msix(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t msix_num_entries = hw_data->num_banks + 1; + int i; + + for (i = 0; i < msix_num_entries; i++) + pci_dev_info->msix_entries.entries[i].entry = i; + + if (pci_enable_msix(pci_dev_info->pci_dev, + pci_dev_info->msix_entries.entries, + msix_num_entries)) { + pr_err("QAT: Failed to enable MSIX IRQ\n"); + return -EFAULT; + } + return 0; +} + +static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) +{ + pci_disable_msix(pci_dev_info->pci_dev); +} + +static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) +{ + struct adf_etr_bank_data *bank = bank_ptr; + + WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); + tasklet_hi_schedule(&bank->resp_hanlder); + return IRQ_HANDLED; +} + +static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) +{ + struct adf_accel_dev *accel_dev = dev_ptr; + + pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id); + return IRQ_HANDLED; +} + +static int adf_request_irqs(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct msix_entry *msixe = pci_dev_info->msix_entries.entries; + struct adf_etr_data *etr_data = accel_dev->transport; + int ret, i; + char *name; + + /* Request msix irq for all banks */ + for (i = 0; i < hw_data->num_banks; i++) { + struct adf_etr_bank_data *bank = &etr_data->banks[i]; + unsigned int cpu, cpus = num_online_cpus(); + + name = *(pci_dev_info->msix_entries.names + i); + snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, + "qat%d-bundle%d", accel_dev->accel_id, i); + ret = request_irq(msixe[i].vector, + adf_msix_isr_bundle, 0, name, bank); + if (ret) { + pr_err("QAT: failed to enable irq %d for %s\n", + msixe[i].vector, name); + return ret; + } + + cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; + irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu)); + } + + /* Request msix irq for AE */ + name = *(pci_dev_info->msix_entries.names + i); + snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, + "qat%d-ae-cluster", accel_dev->accel_id); + ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); + if (ret) { + pr_err("QAT: failed to enable irq %d, for %s\n", + msixe[i].vector, name); + return ret; + } + return ret; +} + +static void adf_free_irqs(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct msix_entry *msixe = pci_dev_info->msix_entries.entries; + struct adf_etr_data *etr_data = accel_dev->transport; + int i; + + for (i = 0; i < hw_data->num_banks; i++) { + irq_set_affinity_hint(msixe[i].vector, NULL); + free_irq(msixe[i].vector, &etr_data->banks[i]); + } + irq_set_affinity_hint(msixe[i].vector, NULL); + free_irq(msixe[i].vector, accel_dev); +} + +static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) +{ + int i; + char **names; + struct msix_entry *entries; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t msix_num_entries = hw_data->num_banks + 1; + + entries = kzalloc_node(msix_num_entries * sizeof(*entries), + GFP_KERNEL, accel_dev->numa_node); + if (!entries) + return -ENOMEM; + + names = kzalloc(msix_num_entries * sizeof(char *), GFP_KERNEL); + if (!names) { + kfree(entries); + return -ENOMEM; + } + for (i = 0; i < msix_num_entries; i++) { + *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); + if (!(*(names + i))) + goto err; + } + accel_dev->accel_pci_dev.msix_entries.entries = entries; + accel_dev->accel_pci_dev.msix_entries.names = names; + return 0; +err: + for (i = 0; i < msix_num_entries; i++) { + if (*(names + i)) + kfree(*(names + i)); + } + kfree(entries); + kfree(names); + return -ENOMEM; +} + +static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + uint32_t msix_num_entries = hw_data->num_banks + 1; + char **names = accel_dev->accel_pci_dev.msix_entries.names; + int i; + + kfree(accel_dev->accel_pci_dev.msix_entries.entries); + for (i = 0; i < msix_num_entries; i++) { + if (*(names + i)) + kfree(*(names + i)); + } + kfree(names); +} + +static int adf_setup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + int i; + + for (i = 0; i < hw_data->num_banks; i++) + tasklet_init(&priv_data->banks[i].resp_hanlder, + adf_response_handler, + (unsigned long)&priv_data->banks[i]); + return 0; +} + +static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + int i; + + for (i = 0; i < hw_data->num_banks; i++) { + tasklet_disable(&priv_data->banks[i].resp_hanlder); + tasklet_kill(&priv_data->banks[i].resp_hanlder); + } +} + +void adf_isr_resource_free(struct adf_accel_dev *accel_dev) +{ + adf_free_irqs(accel_dev); + adf_cleanup_bh(accel_dev); + adf_disable_msix(&accel_dev->accel_pci_dev); + adf_isr_free_msix_entry_table(accel_dev); +} + +int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = adf_isr_alloc_msix_entry_table(accel_dev); + if (ret) + return ret; + if (adf_enable_msix(accel_dev)) + goto err_out; + + if (adf_setup_bh(accel_dev)) + goto err_out; + + if (adf_request_irqs(accel_dev)) + goto err_out; + + return 0; +err_out: + adf_isr_resource_free(accel_dev); + return -EFAULT; +} diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c new file mode 100644 index 000000000000..55b7a8e48bad --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c @@ -0,0 +1,107 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include "adf_drv.h" + +static struct service_hndl qat_admin; + +static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; + int i; + + memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); + req.init_admin_cmd_id = cmd; + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); + if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || + resp.init_resp_hdr.status) + return -EFAULT; + } + return 0; +} + +static int qat_admin_start(struct adf_accel_dev *accel_dev) +{ + return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); +} + +static int qat_admin_event_handler(struct adf_accel_dev *accel_dev, + enum adf_event event) +{ + int ret; + + switch (event) { + case ADF_EVENT_START: + ret = qat_admin_start(accel_dev); + break; + case ADF_EVENT_STOP: + case ADF_EVENT_INIT: + case ADF_EVENT_SHUTDOWN: + default: + ret = 0; + } + return ret; +} + +int qat_admin_register(void) +{ + memset(&qat_admin, 0, sizeof(struct service_hndl)); + qat_admin.event_hld = qat_admin_event_handler; + qat_admin.name = "qat_admin"; + qat_admin.admin = 1; + return adf_service_register(&qat_admin); +} + +int qat_admin_unregister(void) +{ + return adf_service_unregister(&qat_admin); +} -- cgit v1.2.3 From cea4001ae1f80270a30031c6de139313e4dda213 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 5 Jun 2014 13:44:39 -0700 Subject: crypto: qat - Update to makefiles Update to makefiles etc. Don't update the firmware/Makefile yet since there is no FW binary in the crypto repo yet. This will be added later. v3 - removed change to ./firmware/Makefile Reviewed-by: Bruce W. Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- Documentation/ioctl/ioctl-number.txt | 1 + MAINTAINERS | 6 ++++++ drivers/crypto/Kconfig | 1 + drivers/crypto/Makefile | 1 + 4 files changed, 9 insertions(+) (limited to 'drivers/crypto') diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index d7e43fa88575..7e240a7c9ab1 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -197,6 +197,7 @@ Code Seq#(hex) Include File Comments 'a' all linux/atm*.h, linux/sonet.h ATM on linux +'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver 'b' 00-FF conflict! bit3 vme host bridge 'c' all linux/cm4000_cs.h conflict! diff --git a/MAINTAINERS b/MAINTAINERS index 134483f206e4..16d59e4030b7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7212,6 +7212,12 @@ M: Robert Jarzmik L: rtc-linux@googlegroups.com S: Maintained +QAT DRIVER +M: Tadeusz Struk +L: qat-linux@intel.com +S: Supported +F: drivers/crypto/qat/ + QIB DRIVER M: Mike Marciniszyn L: linux-rdma@vger.kernel.org diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 30756bd1a8c4..107822f43fc3 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -418,4 +418,5 @@ config CRYPTO_DEV_MXS_DCP To compile this driver as a module, choose M here: the module will be called mxs-dcp. +source "drivers/crypto/qat/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 482f090d16d0..f996ed72569c 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -23,3 +23,4 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ +obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ -- cgit v1.2.3 From be513f44321f83b01716767550a2a79d2fbe2528 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 9 Jun 2014 18:19:41 +0300 Subject: crypto: caam - remove duplicate FIFOST_CONT_MASK define The FIFOST_CONT_MASK define is cut and pasted twice so we can delete the second instance. Signed-off-by: Dan Carpenter Acked-by: Kim Phillips Acked-by: Marek Vasut Signed-off-by: Herbert Xu --- drivers/crypto/caam/desc.h | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 7e4500f18df6..d397ff9d56fd 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -321,7 +321,6 @@ struct sec4_sg_entry { /* Continue - Not the last FIFO store to come */ #define FIFOST_CONT_SHIFT 23 #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) -#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) /* * Extended Length - use 32-bit extended length that -- cgit v1.2.3 From e60b244281cfe03ddd7c5c15c4e6b6d6316bb530 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 23 Jun 2014 21:37:53 +0800 Subject: crypto: qat - Fix build problem with O= qat adds -I to the ccflags. Unfortunately it uses CURDIR which breaks when make is invoked with O=. This patch replaces CURDIR with $(src) which should work with/without O=. Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_dh895xcc/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile index 8e4924d6a54b..25171c557043 100644 --- a/drivers/crypto/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile @@ -1,4 +1,4 @@ -ccflags-y := -I$(CURDIR)/drivers/crypto/qat/qat_common +ccflags-y := -I$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o qat_dh895xcc-objs := adf_drv.o \ adf_isr.o \ -- cgit v1.2.3 From eb1139cd437afadc63f58159c111e3f166bddb51 Mon Sep 17 00:00:00 2001 From: Ruchika Gupta Date: Mon, 23 Jun 2014 15:08:28 +0530 Subject: crypto: caam - Correct definition of registers in memory map Some registers like SECVID, CHAVID, CHA Revision Number, CTPR were defined as 64 bit resgisters. The IP provides a DWT bit(Double word Transpose) to transpose the two words when a double word register is accessed. However setting this bit would also affect the operation of job descriptors as well as other registers which are truly double word in nature. So, for the IP to work correctly on big-endian as well as little-endian SoC's, change is required to access all 32 bit registers as 32 bit quantities. Signed-off-by: Ruchika Gupta Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 14 +++++---- drivers/crypto/caam/regs.h | 71 +++++++++++++++++++++++++--------------------- 2 files changed, 46 insertions(+), 39 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 34ffc35e3a09..066a4d4b62bf 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -378,7 +378,7 @@ static int caam_probe(struct platform_device *pdev) #ifdef CONFIG_DEBUG_FS struct caam_perfmon *perfmon; #endif - u64 cha_vid; + u32 cha_vid_ls; ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), GFP_KERNEL); @@ -456,8 +456,9 @@ static int caam_probe(struct platform_device *pdev) } /* Check to see if QI present. If so, enable */ - ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) & - CTPR_QI_MASK); + ctrlpriv->qi_present = + !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) & + CTPR_MS_QI_MASK); if (ctrlpriv->qi_present) { ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi; /* This is all that's required to physically enable QI */ @@ -471,13 +472,13 @@ static int caam_probe(struct platform_device *pdev) return -ENOMEM; } - cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id); + cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls); /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation */ - if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) { + if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { ctrlpriv->rng4_sh_init = rd_reg32(&topregs->ctrl.r4tst[0].rdsta); /* @@ -531,7 +532,8 @@ static int caam_probe(struct platform_device *pdev) /* NOTE: RTIC detection ought to go here, around Si time */ - caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); + caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 | + (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls); /* Report "alive" for developer to see */ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index cbde8b95a6f8..7bb898d2e699 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -114,45 +114,45 @@ struct jr_outentry { */ /* Number of DECOs */ -#define CHA_NUM_DECONUM_SHIFT 56 -#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) +#define CHA_NUM_MS_DECONUM_SHIFT 24 +#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) /* CHA Version IDs */ -#define CHA_ID_AES_SHIFT 0 -#define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT) +#define CHA_ID_LS_AES_SHIFT 0 +#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) -#define CHA_ID_DES_SHIFT 4 -#define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT) +#define CHA_ID_LS_DES_SHIFT 4 +#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) -#define CHA_ID_ARC4_SHIFT 8 -#define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT) +#define CHA_ID_LS_ARC4_SHIFT 8 +#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT) -#define CHA_ID_MD_SHIFT 12 -#define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT) +#define CHA_ID_LS_MD_SHIFT 12 +#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_RNG_SHIFT 16 -#define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT) +#define CHA_ID_LS_RNG_SHIFT 16 +#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) -#define CHA_ID_SNW8_SHIFT 20 -#define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT) +#define CHA_ID_LS_SNW8_SHIFT 20 +#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT) -#define CHA_ID_KAS_SHIFT 24 -#define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT) +#define CHA_ID_LS_KAS_SHIFT 24 +#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT) -#define CHA_ID_PK_SHIFT 28 -#define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT) +#define CHA_ID_LS_PK_SHIFT 28 +#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT) -#define CHA_ID_CRC_SHIFT 32 -#define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT) +#define CHA_ID_MS_CRC_SHIFT 0 +#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT) -#define CHA_ID_SNW9_SHIFT 36 -#define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT) +#define CHA_ID_MS_SNW9_SHIFT 4 +#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT) -#define CHA_ID_DECO_SHIFT 56 -#define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT) +#define CHA_ID_MS_DECO_SHIFT 24 +#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT) -#define CHA_ID_JR_SHIFT 60 -#define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT) +#define CHA_ID_MS_JR_SHIFT 28 +#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT) struct sec_vid { u16 ip_id; @@ -172,10 +172,12 @@ struct caam_perfmon { u64 rsvd[13]; /* CAAM Hardware Instantiation Parameters fa0-fbf */ - u64 cha_rev; /* CRNR - CHA Revision Number */ -#define CTPR_QI_SHIFT 57 -#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT) - u64 comp_parms; /* CTPR - Compile Parameters Register */ + u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/ + u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ +#define CTPR_MS_QI_SHIFT 25 +#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) + u32 comp_parms_ms; /* CTPR - Compile Parameters Register */ + u32 comp_parms_ls; /* CTPR - Compile Parameters Register */ u64 rsvd1[2]; /* CAAM Global Status fc0-fdf */ @@ -189,9 +191,12 @@ struct caam_perfmon { /* Component Instantiation Parameters fe0-fff */ u32 rtic_id; /* RVID - RTIC Version ID */ u32 ccb_id; /* CCBVID - CCB Version ID */ - u64 cha_id; /* CHAVID - CHA Version ID */ - u64 cha_num; /* CHANUM - CHA Number */ - u64 caam_id; /* CAAMVID - CAAM Version ID */ + u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/ + u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/ + u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */ + u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/ + u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */ + u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */ }; /* LIODN programming for DMA configuration */ -- cgit v1.2.3 From 17157c90a8abf9323ee2a3daac7ad9f696642dda Mon Sep 17 00:00:00 2001 From: Ruchika Gupta Date: Mon, 23 Jun 2014 17:42:33 +0530 Subject: crypto: caam - Configuration for platforms with virtualization enabled in CAAM For platforms with virtualization enabled 1. The job ring registers can be written to only is the job ring has been started i.e STARTR bit in JRSTART register is 1 2. For DECO's under direct software control, with virtualization enabled PL, BMT, ICID and SDID values need to be provided. These are provided by selecting a Job ring in start mode whose parameters would be used for the DECO access programming. Signed-off-by: Ruchika Gupta Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 39 +++++++++++++++++++++++++++++++++++++++ drivers/crypto/caam/intern.h | 1 + drivers/crypto/caam/regs.h | 18 ++++++++++++++++-- 3 files changed, 56 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 066a4d4b62bf..1d8e003d2582 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -88,6 +88,14 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, /* Set the bit to request direct access to DECO0 */ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; + + if (ctrlpriv->virt_en == 1) + setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); + + while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) && + --timeout) + cpu_relax(); + setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && @@ -130,6 +138,9 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, *status = rd_reg32(&topregs->deco.op_status_hi) & DECO_OP_STATUS_HI_ERR_MASK; + if (ctrlpriv->virt_en == 1) + clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); + /* Mark the DECO as free */ clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); @@ -378,6 +389,7 @@ static int caam_probe(struct platform_device *pdev) #ifdef CONFIG_DEBUG_FS struct caam_perfmon *perfmon; #endif + u32 scfgr, comp_params; u32 cha_vid_ls; ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), @@ -412,6 +424,33 @@ static int caam_probe(struct platform_device *pdev) setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); + /* + * Read the Compile Time paramters and SCFGR to determine + * if Virtualization is enabled for this platform + */ + comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms); + scfgr = rd_reg32(&topregs->ctrl.scfgr); + + ctrlpriv->virt_en = 0; + if (comp_params & CTPR_MS_VIRT_EN_INCL) { + /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or + * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1 + */ + if ((comp_params & CTPR_MS_VIRT_EN_POR) || + (!(comp_params & CTPR_MS_VIRT_EN_POR) && + (scfgr & SCFGR_VIRT_EN))) + ctrlpriv->virt_en = 1; + } else { + /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */ + if (comp_params & CTPR_MS_VIRT_EN_POR) + ctrlpriv->virt_en = 1; + } + + if (ctrlpriv->virt_en == 1) + setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START | + JRSTART_JR1_START | JRSTART_JR2_START | + JRSTART_JR3_START); + if (sizeof(dma_addr_t) == sizeof(u64)) if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) dma_set_mask(dev, DMA_BIT_MASK(40)); diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 6d85fcc5bd0a..97363db4e56e 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -82,6 +82,7 @@ struct caam_drv_private { u8 total_jobrs; /* Total Job Rings in device */ u8 qi_present; /* Nonzero if QI present in device */ int secvio_irq; /* Security violation interrupt number */ + int virt_en; /* Virtualization enabled in CAAM */ #define RNG4_MAX_HANDLES 2 /* RNG4 block */ diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 7bb898d2e699..69e3562dc7d1 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -176,6 +176,8 @@ struct caam_perfmon { u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ #define CTPR_MS_QI_SHIFT 25 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) +#define CTPR_MS_VIRT_EN_INCL 0x00000001 +#define CTPR_MS_VIRT_EN_POR 0x00000002 u32 comp_parms_ms; /* CTPR - Compile Parameters Register */ u32 comp_parms_ls; /* CTPR - Compile Parameters Register */ u64 rsvd1[2]; @@ -309,9 +311,12 @@ struct caam_ctrl { /* Bus Access Configuration Section 010-11f */ /* Read/Writable */ struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */ - u32 rsvd3[12]; + u32 rsvd3[11]; + u32 jrstart; /* JRSTART - Job Ring Start Register */ struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */ - u32 rsvd4[7]; + u32 rsvd4[5]; + u32 deco_rsr; /* DECORSR - Deco Request Source */ + u32 rsvd11; u32 deco_rq; /* DECORR - DECO Request */ struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ u32 rsvd5[22]; @@ -352,7 +357,10 @@ struct caam_ctrl { #define MCFGR_DMA_RESET 0x10000000 #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ #define SCFGR_RDBENABLE 0x00000400 +#define SCFGR_VIRT_EN 0x00008000 #define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ +#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */ +#define DECORSR_VALID 0x80000000 #define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ /* AXI read cache control */ @@ -370,6 +378,12 @@ struct caam_ctrl { #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ #define MCFGR_BURST_64 0x00000001 /* Max burst size */ +/* JRSTART register offsets */ +#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */ +#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */ +#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */ +#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */ + /* * caam_job_ring - direct job ring setup * 1-4 possible per instantiation, base + 1000/2000/3000/4000 -- cgit v1.2.3 From ef94b1d834aace7101de77c3a7c2631b9ae9c5f6 Mon Sep 17 00:00:00 2001 From: Ruchika Gupta Date: Mon, 23 Jun 2014 18:49:30 +0530 Subject: crypto: caam - Add definition of rd/wr_reg64 for little endian platform CAAM IP has certain 64 bit registers . 32 bit architectures cannot force atomic-64 operations. This patch adds definition of these atomic-64 operations for little endian platforms. The definitions which existed previously were for big endian platforms. Signed-off-by: Ruchika Gupta Signed-off-by: Herbert Xu --- drivers/crypto/caam/regs.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 69e3562dc7d1..f48e344ffc39 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -84,6 +84,7 @@ #endif #ifndef CONFIG_64BIT +#ifdef __BIG_ENDIAN static inline void wr_reg64(u64 __iomem *reg, u64 data) { wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); @@ -95,6 +96,21 @@ static inline u64 rd_reg64(u64 __iomem *reg) return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | ((u64)rd_reg32((u32 __iomem *)reg + 1)); } +#else +#ifdef __LITTLE_ENDIAN +static inline void wr_reg64(u64 __iomem *reg, u64 data) +{ + wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32); + wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull); +} + +static inline u64 rd_reg64(u64 __iomem *reg) +{ + return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) | + ((u64)rd_reg32((u32 __iomem *)reg)); +} +#endif +#endif #endif /* -- cgit v1.2.3 From 1da2be33ad4c30a2b1d5fe3053b5b7f63e6e2baa Mon Sep 17 00:00:00 2001 From: Ruchika Gupta Date: Mon, 23 Jun 2014 19:50:26 +0530 Subject: crypto: caam - Correct the dma mapping for sg table At few places in caamhash and caamalg, after allocating a dmable buffer for sg table , the buffer was being modified. As per definition of DMA_FROM_DEVICE ,afer allocation the memory should be treated as read-only by the driver. This patch shifts the allocation of dmable buffer for sg table after it is populated by the driver, making it read-only as per the DMA API's requirement. Signed-off-by: Ruchika Gupta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 8 ++++---- drivers/crypto/caam/caamhash.c | 40 +++++++++++++++++++++++----------------- 2 files changed, 27 insertions(+), 21 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index c09ce1f040d3..87d9de48a39e 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1345,8 +1345,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); *all_contig_ptr = all_contig; sec4_sg_index = 0; @@ -1369,6 +1367,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sg_to_sec4_sg_last(req->dst, dst_nents, edesc->sec4_sg + sec4_sg_index, 0); } + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); return edesc; } @@ -1534,8 +1534,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); *contig_ptr = contig; sec4_sg_index = 0; @@ -1559,6 +1557,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request sg_to_sec4_sg_last(req->dst, dst_nents, edesc->sec4_sg + sec4_sg_index, 0); } + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); return edesc; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 0d9284ef96a8..2ab057b0a6d2 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -808,9 +808,6 @@ static int ahash_update_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, - DMA_TO_DEVICE); ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_BIDIRECTIONAL); @@ -839,6 +836,10 @@ static int ahash_update_ctx(struct ahash_request *req) init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + to_hash, LDST_SGF); @@ -911,8 +912,6 @@ static int ahash_final_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); edesc->src_nents = 0; ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, @@ -923,6 +922,9 @@ static int ahash_final_ctx(struct ahash_request *req) last_buflen); (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, LDST_SGF); @@ -989,8 +991,6 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_TO_DEVICE); @@ -1002,6 +1002,9 @@ static int ahash_finup_ctx(struct ahash_request *req) src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + sec4_sg_src_index, chained); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen + req->nbytes, LDST_SGF); @@ -1056,8 +1059,6 @@ static int ahash_digest(struct ahash_request *req) } edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); edesc->src_nents = src_nents; edesc->chained = chained; @@ -1067,6 +1068,8 @@ static int ahash_digest(struct ahash_request *req) if (src_nents) { sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); src_dma = edesc->sec4_sg_dma; options = LDST_SGF; } else { @@ -1197,9 +1200,6 @@ static int ahash_update_no_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, - DMA_TO_DEVICE); state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, *buflen); @@ -1216,6 +1216,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); @@ -1297,8 +1301,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, state->buf_dma, buflen, @@ -1307,6 +1309,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req) src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, chained); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + req->nbytes, LDST_SGF); @@ -1380,13 +1385,14 @@ static int ahash_update_first(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, - DMA_TO_DEVICE); if (src_nents) { sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); + edesc->sec4_sg_dma = dma_map_single(jrdev, + edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); src_dma = edesc->sec4_sg_dma; options = LDST_SGF; } else { -- cgit v1.2.3 From bce3cc61d3d3e31cb7cd054bb35a3bdd0ec36c71 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 24 Jun 2014 15:19:24 -0700 Subject: crypto: qat - Fix random config build warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix random config build warnings: Implicit-function-declaration ‘__raw_writel’ Cast to pointer from integer of different size [-Wint-to-pointer-cast] Reviewed-by: Bruce Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_accel_devices.h | 1 + drivers/crypto/qat/qat_common/qat_algs.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index f3206d9a41ab..9282381b03ce 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -50,6 +50,7 @@ #include #include #include +#include #include "adf_cfg_common.h" #define PCI_VENDOR_ID_INTEL 0x8086 diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 193b753d6a0a..c4e80104dfe9 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -745,7 +745,7 @@ void qat_alg_callback(void *resp) { struct icp_qat_fw_la_resp *qat_resp = resp; struct qat_crypto_request *qat_req = - (void *)(dma_addr_t)qat_resp->opaque_data; + (void *)(__force long)qat_resp->opaque_data; struct qat_alg_session_ctx *ctx = qat_req->ctx; struct qat_crypto_instance *inst = ctx->inst; struct aead_request *areq = qat_req->areq; @@ -779,7 +779,7 @@ static int qat_alg_dec(struct aead_request *areq) *msg = ctx->dec_fw_req_tmpl; qat_req->ctx = ctx; qat_req->areq = areq; - qat_req->req.comn_mid.opaque_data = (uint64_t)(dma_addr_t)qat_req; + qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; @@ -822,7 +822,7 @@ static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, *msg = ctx->enc_fw_req_tmpl; qat_req->ctx = ctx; qat_req->areq = areq; - qat_req->req.comn_mid.opaque_data = (uint64_t)(dma_addr_t)qat_req; + qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; -- cgit v1.2.3 From 83530d818f63b737694226fa7af7ae13b6e6c76b Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 24 Jun 2014 15:19:29 -0700 Subject: crypto: qat - Updated Firmware Info Metadata Updated Firmware Info Metadata Reviewed-by: Bruce Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/Kconfig | 1 + drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 3bd3fd968de1..49bede2a9f77 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -8,6 +8,7 @@ config CRYPTO_DEV_QAT select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 + select FW_LOADER config CRYPTO_DEV_QAT_DH895xCC tristate "Support for Intel(R) DH895xCC" diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 1da2f78b2058..0d0435a41be9 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -445,4 +445,5 @@ module_exit(adfdrv_release); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE("qat_895xcc.bin"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); -- cgit v1.2.3 From d65071ecde1ed1b99d057a877e0e3d29748c3a4d Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 24 Jun 2014 15:19:34 -0700 Subject: crypto: qat - Fixed new checkpatch warnings After updates to checkpatch new warnings pops up this patch fixes them. Signed-off-by: Bruce Allan Acked-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_aer.c | 1 + drivers/crypto/qat/qat_common/adf_cfg.c | 3 +++ drivers/crypto/qat/qat_common/adf_transport.c | 2 ++ drivers/crypto/qat/qat_common/adf_transport_debug.c | 3 +++ drivers/crypto/qat/qat_common/qat_algs.c | 4 ++++ drivers/crypto/qat/qat_common/qat_crypto.c | 4 ++-- drivers/crypto/qat/qat_common/qat_hal.c | 7 +++++-- drivers/crypto/qat/qat_common/qat_uclo.c | 17 ++++++++++------- drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c | 12 ++++++------ drivers/crypto/qat/qat_dh895xcc/adf_isr.c | 2 +- 10 files changed, 37 insertions(+), 18 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 0651678e2345..c29d4c3926bf 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -95,6 +95,7 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev) pending = ppdstat & PCI_EXP_DEVSTA_TRPND; if (pending) { int ctr = 0; + do { msleep(100); pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index 389c0cf28e70..42790760ade1 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -57,6 +57,7 @@ static DEFINE_MUTEX(qat_cfg_read_lock); static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos) { struct adf_cfg_device_data *dev_cfg = sfile->private; + mutex_lock(&qat_cfg_read_lock); return seq_list_start(&dev_cfg->sec_list, *pos); } @@ -79,6 +80,7 @@ static int qat_dev_cfg_show(struct seq_file *sfile, void *v) static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos) { struct adf_cfg_device_data *dev_cfg = sfile->private; + return seq_list_next(v, &dev_cfg->sec_list, pos); } @@ -100,6 +102,7 @@ static int qat_dev_cfg_open(struct inode *inode, struct file *file) if (!ret) { struct seq_file *seq_f = file->private_data; + seq_f->private = inode->i_private; } return ret; diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index b21301056e3f..5f3fa45348b4 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -55,6 +55,7 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) { uint32_t div = data >> shift; uint32_t mult = div << shift; + return data - mult; } @@ -68,6 +69,7 @@ static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) { int i = ADF_MIN_RING_SIZE; + for (; i <= ADF_MAX_RING_SIZE; i++) if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) return i; diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c index d83a581a9135..6b6974553514 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -57,6 +57,7 @@ static DEFINE_MUTEX(bank_read_lock); static void *adf_ring_start(struct seq_file *sfile, loff_t *pos) { struct adf_etr_ring_data *ring = sfile->private; + mutex_lock(&ring_read_lock); if (*pos == 0) return SEQ_START_TOKEN; @@ -143,6 +144,7 @@ static int adf_ring_open(struct inode *inode, struct file *file) if (!ret) { struct seq_file *seq_f = file->private_data; + seq_f->private = inode->i_private; } return ret; @@ -258,6 +260,7 @@ static int adf_bank_open(struct inode *inode, struct file *file) if (!ret) { struct seq_file *seq_f = file->private_data; + seq_f->private = inode->i_private; } return ret; diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index c4e80104dfe9..946686f83660 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -610,6 +610,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, if (blp != blpout) { /* If out of place operation dma unmap only data */ int bufless = bufs - blout->num_mapped_bufs; + for (i = bufless; i < bufs; i++) { dma_unmap_single(dev, blout->bufers[i].addr, blout->bufers[i].len, @@ -667,6 +668,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, for_each_sg(sgl, sg, n, i) { int y = i + bufs; + bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, DMA_BIDIRECTIONAL); @@ -698,6 +700,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, } for_each_sg(sglout, sg, n, i) { int y = i + bufs; + bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, DMA_BIDIRECTIONAL); @@ -729,6 +732,7 @@ err: if (sgl != sglout && buflout) { for_each_sg(sglout, sg, n, i) { int y = i + bufs; + if (!dma_mapping_error(dev, buflout->bufers[y].addr)) dma_unmap_single(dev, buflout->bufers[y].addr, buflout->bufers[y].len, diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index cc562cb67e37..0d59bcb50de1 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -146,8 +146,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) { int i; - long unsigned int bank; - long unsigned int num_inst, num_msg_sym, num_msg_asym; + unsigned long bank; + unsigned long num_inst, num_msg_sym, num_msg_asym; int msg_size; struct qat_crypto_instance *inst; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 260cd7141e74..903ca88109f7 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -266,6 +266,7 @@ static unsigned short qat_hal_get_reg_addr(unsigned int type, unsigned short reg_num) { unsigned short reg_addr; + switch (type) { case ICP_GPA_ABS: case ICP_GPB_ABS: @@ -816,6 +817,7 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < words_num; i++) { unsigned int uwrd_lo, uwrd_hi, tmp; + uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) | ((data[i] & 0xff00) << 2) | (0x3 << 8) | (data[i] & 0xff); @@ -888,6 +890,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, return -EFAULT; if (endpc) { unsigned int ctx_status; + qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &ctx_status); *endpc = ctx_status & handle->hal_handle->upc_mask; @@ -1111,8 +1114,8 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, alloc_inst_size = lm_init_header->size; if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) alloc_inst_size = handle->hal_handle->max_ustore; - micro_inst_arry = kmalloc(alloc_inst_size * sizeof(uint64_t), - GFP_KERNEL); + micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t), + GFP_KERNEL); if (!micro_inst_arry) return -ENOMEM; micro_inst_num = 0; diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index a698d07ef97b..20b6b4269ca7 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -171,7 +171,6 @@ static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, num_in_bytes -= 4; ptr += 4; } - return; } static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, @@ -186,6 +185,7 @@ static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, umem_init = umem_init_header->next; while (umem_init) { unsigned int addr, *value, size; + ae = umem_init->ae; addr = umem_init->addr; value = umem_init->value; @@ -204,6 +204,7 @@ qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, umem_init = *base; while (umem_init) { struct icp_qat_uof_batch_init *pre; + pre = umem_init; umem_init = umem_init->next; kfree(pre); @@ -414,7 +415,7 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, uint64_t *fill_data; uof_image = image->img_ptr; - fill_data = kzalloc(ICP_QAT_UCLO_MAX_USTORE * sizeof(uint64_t), + fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), GFP_KERNEL); if (!fill_data) return -EFAULT; @@ -720,6 +721,7 @@ qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, obj_hdr->file_buff, tab_name, NULL); if (chunk_hdr) { int hdr_size; + memcpy(&str_table->table_len, obj_hdr->file_buff + chunk_hdr->offset, sizeof(str_table->table_len)); hdr_size = (char *)&str_table->strings - (char *)str_table; @@ -821,6 +823,7 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, for (i = 0; i < encap_ae->init_regsym_num; i++) { unsigned int exp_res; + init_regsym = &encap_ae->init_regsym[i]; exp_res = init_regsym->value; switch (init_regsym->init_type) { @@ -949,7 +952,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ae; - obj_handle->uword_buf = kzalloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t), + obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), GFP_KERNEL); if (!obj_handle->uword_buf) return -ENOMEM; @@ -957,20 +960,20 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) obj_handle->obj_hdr->file_buff; obj_handle->encap_uof_obj.chunk_hdr = (struct icp_qat_uof_chunkhdr *) - obj_handle->obj_hdr->file_buff + sizeof(struct icp_qat_uof_objhdr); + (obj_handle->obj_hdr->file_buff + sizeof(struct icp_qat_uof_objhdr)); obj_handle->uword_in_bytes = 6; obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; obj_handle->prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (qat_uclo_check_uof_compat(obj_handle)) { - pr_err("QAT: uof incompatible\n "); + pr_err("QAT: uof incompatible\n"); return -EINVAL; } obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; if (!(obj_handle->obj_hdr->file_buff) || !(qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, &(obj_handle->str_table)))) { - pr_err("QAT: uof doesn't have effective images"); + pr_err("QAT: uof doesn't have effective images\n"); goto out_err; } obj_handle->uimage_num = @@ -979,7 +982,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) if (!obj_handle->uimage_num) goto out_err; if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { - pr_err("QAT: Bad object\n "); + pr_err("QAT: Bad object\n"); goto out_check_uof_aemask_err; } qat_uclo_init_uword_num(handle); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c index 36ffa70211c9..1864bdb36f8f 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c @@ -63,29 +63,29 @@ #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * index), value); + (ADF_ARB_REG_SLOT * index), value) #define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \ ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ - ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value); + ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value) #define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \ ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \ - (ADF_ARB_REG_SIZE * index), value); + (ADF_ARB_REG_SIZE * index), value) #define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \ ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \ - (ADF_ARB_REG_SIZE * index), value); + (ADF_ARB_REG_SIZE * index), value) #define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \ ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ ADF_ARB_WRK_2_SER_MAP_OFFSET) + \ - (ADF_ARB_REG_SIZE * index), value); + (ADF_ARB_REG_SIZE * index), value) #define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \ ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ - ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value); + ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value) int adf_init_arb(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c index 77e2d3a573f4..d4172dedf775 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c @@ -172,7 +172,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) if (!entries) return -ENOMEM; - names = kzalloc(msix_num_entries * sizeof(char *), GFP_KERNEL); + names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); if (!names) { kfree(entries); return -ENOMEM; -- cgit v1.2.3 From 8f312d64b5eea5c1f807265c1010969a0cb4b876 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 24 Jun 2014 15:19:40 -0700 Subject: crypto: qat - Fix error path crash when no firmware is present Firmware loader crashes when no firmware file is present. Reviewed-by: Bruce Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_common_drv.h | 2 +- drivers/crypto/qat/qat_common/qat_uclo.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 3cea9fa26158..5e8f9d431e5d 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -186,7 +186,7 @@ int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned short lm_addr, unsigned int value); int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); -int qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); +void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, int mem_size); #endif diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 20b6b4269ca7..dd4e0d3c323a 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -959,8 +959,6 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) obj_handle->obj_hdr->file_buff; - obj_handle->encap_uof_obj.chunk_hdr = (struct icp_qat_uof_chunkhdr *) - (obj_handle->obj_hdr->file_buff + sizeof(struct icp_qat_uof_objhdr)); obj_handle->uword_in_bytes = 6; obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; obj_handle->prod_rev = PID_MAJOR_REV | @@ -1040,23 +1038,25 @@ out_objbuf_err: return -ENOMEM; } -int qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) +void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; int a; + if (!obj_handle) + return; + kfree(obj_handle->uword_buf); for (a = 0; a < obj_handle->uimage_num; a++) kfree(obj_handle->ae_uimage[a].page); for (a = 0; a <= (int)handle->hal_handle->ae_max_num; a++) qat_uclo_free_ae_data(&obj_handle->ae_data[a]); - kfree(obj_handle->obj_hdr); + kfree(obj_handle->obj_hdr); kfree(obj_handle->obj_buf); kfree(obj_handle); handle->obj_handle = NULL; - return 0; } static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, -- cgit v1.2.3 From ec8f5d8f6f76b939f662d6e83041abecabef0a34 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Wed, 25 Jun 2014 19:28:57 +0300 Subject: crypto: qce - Qualcomm crypto engine driver The driver is separated by functional parts. The core part implements a platform driver probe and remove callbaks. The probe enables clocks, checks crypto version, initialize and request dma channels, create done tasklet and init crypto queue and finally register the algorithms into crypto core subsystem. - DMA and SG helper functions implement dmaengine and sg-list helper functions used by other parts of the crypto driver. - ablkcipher algorithms implementation of AES, DES and 3DES crypto API callbacks, the crypto register alg function, the async request handler and its dma done callback function. - SHA and HMAC transforms implementation and registration of ahash crypto type. It includes sha1, sha256, hmac(sha1) and hmac(sha256). - infrastructure to setup the crypto hw contains functions used to setup/prepare hardware registers for all algorithms supported by the crypto block. It also exports few helper functions needed by algorithms: - to check hardware status - to start crypto hardware - to translate data stream to big endian form Adds register addresses and bit/masks used by the driver as well. Signed-off-by: Stanimir Varbanov Signed-off-by: Herbert Xu --- drivers/crypto/qce/ablkcipher.c | 431 +++++++++++++++++++++++++++++ drivers/crypto/qce/cipher.h | 68 +++++ drivers/crypto/qce/common.c | 435 +++++++++++++++++++++++++++++ drivers/crypto/qce/common.h | 102 +++++++ drivers/crypto/qce/core.c | 286 +++++++++++++++++++ drivers/crypto/qce/core.h | 68 +++++ drivers/crypto/qce/dma.c | 186 +++++++++++++ drivers/crypto/qce/dma.h | 58 ++++ drivers/crypto/qce/regs-v5.h | 334 +++++++++++++++++++++++ drivers/crypto/qce/sha.c | 588 ++++++++++++++++++++++++++++++++++++++++ drivers/crypto/qce/sha.h | 81 ++++++ 11 files changed, 2637 insertions(+) create mode 100644 drivers/crypto/qce/ablkcipher.c create mode 100644 drivers/crypto/qce/cipher.h create mode 100644 drivers/crypto/qce/common.c create mode 100644 drivers/crypto/qce/common.h create mode 100644 drivers/crypto/qce/core.c create mode 100644 drivers/crypto/qce/core.h create mode 100644 drivers/crypto/qce/dma.c create mode 100644 drivers/crypto/qce/dma.h create mode 100644 drivers/crypto/qce/regs-v5.h create mode 100644 drivers/crypto/qce/sha.c create mode 100644 drivers/crypto/qce/sha.h (limited to 'drivers/crypto') diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c new file mode 100644 index 000000000000..ad592de475a4 --- /dev/null +++ b/drivers/crypto/qce/ablkcipher.c @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "cipher.h" + +static LIST_HEAD(ablkcipher_algs); + +static void qce_ablkcipher_done(void *data) +{ + struct crypto_async_request *async_req = data; + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + enum dma_data_direction dir_src, dir_dst; + u32 status; + int error; + bool diff_dst; + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; + + error = qce_dma_terminate_all(&qce->dma); + if (error) + dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", + error); + + if (diff_dst) + qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src, + rctx->dst_chained); + qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, + rctx->dst_chained); + + sg_free_table(&rctx->dst_tbl); + + error = qce_check_status(qce, &status); + if (error < 0) + dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); + + qce->async_req_done(tmpl->qce, error); +} + +static int +qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + enum dma_data_direction dir_src, dir_dst; + struct scatterlist *sg; + bool diff_dst; + gfp_t gfp; + int ret; + + rctx->iv = req->info; + rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); + rctx->cryptlen = req->nbytes; + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; + + rctx->src_nents = qce_countsg(req->src, req->nbytes, + &rctx->src_chained); + if (diff_dst) { + rctx->dst_nents = qce_countsg(req->dst, req->nbytes, + &rctx->dst_chained); + } else { + rctx->dst_nents = rctx->src_nents; + rctx->dst_chained = rctx->src_chained; + } + + rctx->dst_nents += 1; + + gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + + ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + + sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + + sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + + sg_mark_end(sg); + rctx->dst_sg = rctx->dst_tbl.sgl; + + ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, + rctx->dst_chained); + if (ret < 0) + goto error_free; + + if (diff_dst) { + ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src, + rctx->src_chained); + if (ret < 0) + goto error_unmap_dst; + rctx->src_sg = req->src; + } else { + rctx->src_sg = rctx->dst_sg; + } + + ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, + rctx->dst_sg, rctx->dst_nents, + qce_ablkcipher_done, async_req); + if (ret) + goto error_unmap_src; + + qce_dma_issue_pending(&qce->dma); + + ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); + if (ret) + goto error_terminate; + + return 0; + +error_terminate: + qce_dma_terminate_all(&qce->dma); +error_unmap_src: + if (diff_dst) + qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src, + rctx->src_chained); +error_unmap_dst: + qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, + rctx->dst_chained); +error_free: + sg_free_table(&rctx->dst_tbl); + return ret; +} + +static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + unsigned long flags = to_cipher_tmpl(tfm)->alg_flags; + int ret; + + if (!key || !keylen) + return -EINVAL; + + if (IS_AES(flags)) { + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; + default: + goto fallback; + } + } else if (IS_DES(flags)) { + u32 tmp[DES_EXPKEY_WORDS]; + + ret = des_ekey(tmp, key); + if (!ret && crypto_ablkcipher_get_flags(ablk) & + CRYPTO_TFM_REQ_WEAK_KEY) + goto weakkey; + } + + ctx->enc_keylen = keylen; + memcpy(ctx->enc_key, key, keylen); + return 0; +fallback: + ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); + if (!ret) + ctx->enc_keylen = keylen; + return ret; +weakkey: + crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; +} + +static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); + int ret; + + rctx->flags = tmpl->alg_flags; + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; + + if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && + ctx->enc_keylen != AES_KEYSIZE_256) { + ablkcipher_request_set_tfm(req, ctx->fallback); + ret = encrypt ? crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + return ret; + } + + return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); +} + +static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) +{ + return qce_ablkcipher_crypt(req, 1); +} + +static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) +{ + return qce_ablkcipher_crypt(req, 0); +} + +static int qce_ablkcipher_init(struct crypto_tfm *tfm) +{ + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + memset(ctx, 0, sizeof(*ctx)); + tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); + + ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), + CRYPTO_ALG_TYPE_ABLKCIPHER, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback)) + return PTR_ERR(ctx->fallback); + + return 0; +} + +static void qce_ablkcipher_exit(struct crypto_tfm *tfm) +{ + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_ablkcipher(ctx->fallback); +} + +struct qce_ablkcipher_def { + unsigned long flags; + const char *name; + const char *drv_name; + unsigned int blocksize; + unsigned int ivsize; + unsigned int min_keysize; + unsigned int max_keysize; +}; + +static const struct qce_ablkcipher_def ablkcipher_def[] = { + { + .flags = QCE_ALG_AES | QCE_MODE_ECB, + .name = "ecb(aes)", + .drv_name = "ecb-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + { + .flags = QCE_ALG_AES | QCE_MODE_CBC, + .name = "cbc(aes)", + .drv_name = "cbc-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + { + .flags = QCE_ALG_AES | QCE_MODE_CTR, + .name = "ctr(aes)", + .drv_name = "ctr-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + { + .flags = QCE_ALG_AES | QCE_MODE_XTS, + .name = "xts(aes)", + .drv_name = "xts-aes-qce", + .blocksize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + { + .flags = QCE_ALG_DES | QCE_MODE_ECB, + .name = "ecb(des)", + .drv_name = "ecb-des-qce", + .blocksize = DES_BLOCK_SIZE, + .ivsize = 0, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + { + .flags = QCE_ALG_DES | QCE_MODE_CBC, + .name = "cbc(des)", + .drv_name = "cbc-des-qce", + .blocksize = DES_BLOCK_SIZE, + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + { + .flags = QCE_ALG_3DES | QCE_MODE_ECB, + .name = "ecb(des3_ede)", + .drv_name = "ecb-3des-qce", + .blocksize = DES3_EDE_BLOCK_SIZE, + .ivsize = 0, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, + { + .flags = QCE_ALG_3DES | QCE_MODE_CBC, + .name = "cbc(des3_ede)", + .drv_name = "cbc-3des-qce", + .blocksize = DES3_EDE_BLOCK_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, +}; + +static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, + struct qce_device *qce) +{ + struct qce_alg_template *tmpl; + struct crypto_alg *alg; + int ret; + + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); + if (!tmpl) + return -ENOMEM; + + alg = &tmpl->alg.crypto; + + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + + alg->cra_blocksize = def->blocksize; + alg->cra_ablkcipher.ivsize = def->ivsize; + alg->cra_ablkcipher.min_keysize = def->min_keysize; + alg->cra_ablkcipher.max_keysize = def->max_keysize; + alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey; + alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; + alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; + + alg->cra_priority = 300; + alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK; + alg->cra_ctxsize = sizeof(struct qce_cipher_ctx); + alg->cra_alignmask = 0; + alg->cra_type = &crypto_ablkcipher_type; + alg->cra_module = THIS_MODULE; + alg->cra_init = qce_ablkcipher_init; + alg->cra_exit = qce_ablkcipher_exit; + INIT_LIST_HEAD(&alg->cra_list); + + INIT_LIST_HEAD(&tmpl->entry); + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; + tmpl->alg_flags = def->flags; + tmpl->qce = qce; + + ret = crypto_register_alg(alg); + if (ret) { + kfree(tmpl); + dev_err(qce->dev, "%s registration failed\n", alg->cra_name); + return ret; + } + + list_add_tail(&tmpl->entry, &ablkcipher_algs); + dev_dbg(qce->dev, "%s is registered\n", alg->cra_name); + return 0; +} + +static void qce_ablkcipher_unregister(struct qce_device *qce) +{ + struct qce_alg_template *tmpl, *n; + + list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { + crypto_unregister_alg(&tmpl->alg.crypto); + list_del(&tmpl->entry); + kfree(tmpl); + } +} + +static int qce_ablkcipher_register(struct qce_device *qce) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) { + ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce); + if (ret) + goto err; + } + + return 0; +err: + qce_ablkcipher_unregister(qce); + return ret; +} + +const struct qce_algo_ops ablkcipher_ops = { + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .register_algs = qce_ablkcipher_register, + .unregister_algs = qce_ablkcipher_unregister, + .async_req_handle = qce_ablkcipher_async_req_handle, +}; diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h new file mode 100644 index 000000000000..d5757cfcda2d --- /dev/null +++ b/drivers/crypto/qce/cipher.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CIPHER_H_ +#define _CIPHER_H_ + +#include "common.h" +#include "core.h" + +#define QCE_MAX_KEY_SIZE 64 + +struct qce_cipher_ctx { + u8 enc_key[QCE_MAX_KEY_SIZE]; + unsigned int enc_keylen; + struct crypto_ablkcipher *fallback; +}; + +/** + * struct qce_cipher_reqctx - holds private cipher objects per request + * @flags: operation flags + * @iv: pointer to the IV + * @ivsize: IV size + * @src_nents: source entries + * @dst_nents: destination entries + * @src_chained: is source chained + * @dst_chained: is destination chained + * @result_sg: scatterlist used for result buffer + * @dst_tbl: destination sg table + * @dst_sg: destination sg pointer table beginning + * @src_tbl: source sg table + * @src_sg: source sg pointer table beginning; + * @cryptlen: crypto length + */ +struct qce_cipher_reqctx { + unsigned long flags; + u8 *iv; + unsigned int ivsize; + int src_nents; + int dst_nents; + bool src_chained; + bool dst_chained; + struct scatterlist result_sg; + struct sg_table dst_tbl; + struct scatterlist *dst_sg; + struct sg_table src_tbl; + struct scatterlist *src_sg; + unsigned int cryptlen; +}; + +static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + return container_of(alg, struct qce_alg_template, alg.crypto); +} + +extern const struct qce_algo_ops ablkcipher_ops; + +#endif /* _CIPHER_H_ */ diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c new file mode 100644 index 000000000000..1cd4d5ea8114 --- /dev/null +++ b/drivers/crypto/qce/common.c @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "cipher.h" +#include "common.h" +#include "core.h" +#include "regs-v5.h" +#include "sha.h" + +#define QCE_SECTOR_SIZE 512 + +static inline u32 qce_read(struct qce_device *qce, u32 offset) +{ + return readl(qce->base + offset); +} + +static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) +{ + writel(val, qce->base + offset); +} + +static inline void qce_write_array(struct qce_device *qce, u32 offset, + const u32 *val, unsigned int len) +{ + int i; + + for (i = 0; i < len; i++) + qce_write(qce, offset + i * sizeof(u32), val[i]); +} + +static inline void +qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) +{ + int i; + + for (i = 0; i < len; i++) + qce_write(qce, offset + i * sizeof(u32), 0); +} + +static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) +{ + u32 cfg = 0; + + if (IS_AES(flags)) { + if (aes_key_size == AES_KEYSIZE_128) + cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; + else if (aes_key_size == AES_KEYSIZE_256) + cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; + } + + if (IS_AES(flags)) + cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; + else if (IS_DES(flags) || IS_3DES(flags)) + cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; + + if (IS_DES(flags)) + cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; + + if (IS_3DES(flags)) + cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; + + switch (flags & QCE_MODE_MASK) { + case QCE_MODE_ECB: + cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CBC: + cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CTR: + cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; + break; + case QCE_MODE_XTS: + cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CCM: + cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; + cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; + break; + default: + return ~0; + } + + return cfg; +} + +static u32 qce_auth_cfg(unsigned long flags, u32 key_size) +{ + u32 cfg = 0; + + if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags))) + cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT; + else + cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT; + + if (IS_CCM(flags) || IS_CMAC(flags)) { + if (key_size == AES_KEYSIZE_128) + cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT; + else if (key_size == AES_KEYSIZE_256) + cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT; + } + + if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) + cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT; + else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) + cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT; + else if (IS_CMAC(flags)) + cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT; + + if (IS_SHA1(flags) || IS_SHA256(flags)) + cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT; + else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) || + IS_CBC(flags) || IS_CTR(flags)) + cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT; + else if (IS_AES(flags) && IS_CCM(flags)) + cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT; + else if (IS_AES(flags) && IS_CMAC(flags)) + cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT; + + if (IS_SHA(flags) || IS_SHA_HMAC(flags)) + cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; + + if (IS_CCM(flags)) + cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT; + + if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) || + IS_CMAC(flags)) + cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT); + + return cfg; +} + +static u32 qce_config_reg(struct qce_device *qce, int little) +{ + u32 beats = (qce->burst_size >> 3) - 1; + u32 pipe_pair = qce->pipe_pair_id; + u32 config; + + config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; + config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | + BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); + config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; + config &= ~HIGH_SPD_EN_N_SHIFT; + + if (little) + config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); + + return config; +} + +void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) +{ + __be32 *d = dst; + const u8 *s = src; + unsigned int n; + + n = len / sizeof(u32); + for (; n > 0; n--) { + *d = cpu_to_be32p((const __u32 *) s); + s += sizeof(__u32); + d++; + } +} + +static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) +{ + u8 swap[QCE_AES_IV_LENGTH]; + u32 i, j; + + if (ivsize > QCE_AES_IV_LENGTH) + return; + + memset(swap, 0, QCE_AES_IV_LENGTH); + + for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; + i < QCE_AES_IV_LENGTH; i++, j--) + swap[i] = src[j]; + + qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); +} + +static void qce_xtskey(struct qce_device *qce, const u8 *enckey, + unsigned int enckeylen, unsigned int cryptlen) +{ + u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; + unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); + unsigned int xtsdusize; + + qce_cpu_to_be32p_array(xtskey, enckey + enckeylen / 2, enckeylen / 2); + qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); + + /* xts du size 512B */ + xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); + qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); +} + +static void qce_setup_config(struct qce_device *qce) +{ + u32 config; + + /* get big endianness */ + config = qce_config_reg(qce, 0); + + /* clear status */ + qce_write(qce, REG_STATUS, 0); + qce_write(qce, REG_CONFIG, config); +} + +static inline void qce_crypto_go(struct qce_device *qce) +{ + qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); +} + +static int qce_setup_regs_ahash(struct crypto_async_request *async_req, + u32 totallen, u32 offset) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm); + __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0}; + __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0}; + u32 auth_cfg = 0, config; + unsigned int iv_words; + + /* if not the last, the size has to be on the block boundary */ + if (!rctx->last_blk && req->nbytes % blocksize) + return -EINVAL; + + qce_setup_config(qce); + + if (IS_CMAC(rctx->flags)) { + qce_write(qce, REG_AUTH_SEG_CFG, 0); + qce_write(qce, REG_ENCR_SEG_CFG, 0); + qce_write(qce, REG_ENCR_SEG_SIZE, 0); + qce_clear_array(qce, REG_AUTH_IV0, 16); + qce_clear_array(qce, REG_AUTH_KEY0, 16); + qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); + + auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen); + } + + if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) { + u32 authkey_words = rctx->authklen / sizeof(u32); + + qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); + qce_write_array(qce, REG_AUTH_KEY0, mackey, authkey_words); + } + + if (IS_CMAC(rctx->flags)) + goto go_proc; + + if (rctx->first_blk) + memcpy(auth, rctx->digest, digestsize); + else + qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); + + iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; + qce_write_array(qce, REG_AUTH_IV0, auth, iv_words); + + if (rctx->first_blk) + qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); + else + qce_write_array(qce, REG_AUTH_BYTECNT0, rctx->byte_count, 2); + + auth_cfg = qce_auth_cfg(rctx->flags, 0); + + if (rctx->last_blk) + auth_cfg |= BIT(AUTH_LAST_SHIFT); + else + auth_cfg &= ~BIT(AUTH_LAST_SHIFT); + + if (rctx->first_blk) + auth_cfg |= BIT(AUTH_FIRST_SHIFT); + else + auth_cfg &= ~BIT(AUTH_FIRST_SHIFT); + +go_proc: + qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); + qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes); + qce_write(qce, REG_AUTH_SEG_START, 0); + qce_write(qce, REG_ENCR_SEG_CFG, 0); + qce_write(qce, REG_SEG_SIZE, req->nbytes); + + /* get little endianness */ + config = qce_config_reg(qce, 1); + qce_write(qce, REG_CONFIG, config); + + qce_crypto_go(qce); + + return 0; +} + +static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, + u32 totallen, u32 offset) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(async_req); + struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); + struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; + __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; + unsigned int enckey_words, enciv_words; + unsigned int keylen; + u32 encr_cfg = 0, auth_cfg = 0, config; + unsigned int ivsize = rctx->ivsize; + unsigned long flags = rctx->flags; + + qce_setup_config(qce); + + if (IS_XTS(flags)) + keylen = ctx->enc_keylen / 2; + else + keylen = ctx->enc_keylen; + + qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen); + enckey_words = keylen / sizeof(u32); + + qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); + + encr_cfg = qce_encr_cfg(flags, keylen); + + if (IS_DES(flags)) { + enciv_words = 2; + enckey_words = 2; + } else if (IS_3DES(flags)) { + enciv_words = 2; + enckey_words = 6; + } else if (IS_AES(flags)) { + if (IS_XTS(flags)) + qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen, + rctx->cryptlen); + enciv_words = 4; + } else { + return -EINVAL; + } + + qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words); + + if (!IS_ECB(flags)) { + if (IS_XTS(flags)) + qce_xts_swapiv(enciv, rctx->iv, ivsize); + else + qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); + + qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words); + } + + if (IS_ENCRYPT(flags)) + encr_cfg |= BIT(ENCODE_SHIFT); + + qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); + qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); + qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); + + if (IS_CTR(flags)) { + qce_write(qce, REG_CNTR_MASK, ~0); + qce_write(qce, REG_CNTR_MASK0, ~0); + qce_write(qce, REG_CNTR_MASK1, ~0); + qce_write(qce, REG_CNTR_MASK2, ~0); + } + + qce_write(qce, REG_SEG_SIZE, totallen); + + /* get little endianness */ + config = qce_config_reg(qce, 1); + qce_write(qce, REG_CONFIG, config); + + qce_crypto_go(qce); + + return 0; +} + +int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, + u32 offset) +{ + switch (type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + return qce_setup_regs_ablkcipher(async_req, totallen, offset); + case CRYPTO_ALG_TYPE_AHASH: + return qce_setup_regs_ahash(async_req, totallen, offset); + default: + return -EINVAL; + } +} + +#define STATUS_ERRORS \ + (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) + +int qce_check_status(struct qce_device *qce, u32 *status) +{ + int ret = 0; + + *status = qce_read(qce, REG_STATUS); + + /* + * Don't use result dump status. The operation may not be complete. + * Instead, use the status we just read from device. In case, we need to + * use result_status from result dump the result_status needs to be byte + * swapped, since we set the device to little endian. + */ + if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) + ret = -ENXIO; + + return ret; +} + +void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step) +{ + u32 val; + + val = qce_read(qce, REG_VERSION); + *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT; + *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT; + *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT; +} diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h new file mode 100644 index 000000000000..411b1fc97216 --- /dev/null +++ b/drivers/crypto/qce/common.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _COMMON_H_ +#define _COMMON_H_ + +#include +#include +#include +#include + +/* key size in bytes */ +#define QCE_SHA_HMAC_KEY_SIZE 64 +#define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256 + +/* IV length in bytes */ +#define QCE_AES_IV_LENGTH AES_BLOCK_SIZE +/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ +#define QCE_MAX_IV_SIZE AES_BLOCK_SIZE + +/* maximum nonce bytes */ +#define QCE_MAX_NONCE 16 +#define QCE_MAX_NONCE_WORDS (QCE_MAX_NONCE / sizeof(u32)) + +/* burst size alignment requirement */ +#define QCE_MAX_ALIGN_SIZE 64 + +/* cipher algorithms */ +#define QCE_ALG_DES BIT(0) +#define QCE_ALG_3DES BIT(1) +#define QCE_ALG_AES BIT(2) + +/* hash and hmac algorithms */ +#define QCE_HASH_SHA1 BIT(3) +#define QCE_HASH_SHA256 BIT(4) +#define QCE_HASH_SHA1_HMAC BIT(5) +#define QCE_HASH_SHA256_HMAC BIT(6) +#define QCE_HASH_AES_CMAC BIT(7) + +/* cipher modes */ +#define QCE_MODE_CBC BIT(8) +#define QCE_MODE_ECB BIT(9) +#define QCE_MODE_CTR BIT(10) +#define QCE_MODE_XTS BIT(11) +#define QCE_MODE_CCM BIT(12) +#define QCE_MODE_MASK GENMASK(12, 8) + +/* cipher encryption/decryption operations */ +#define QCE_ENCRYPT BIT(13) +#define QCE_DECRYPT BIT(14) + +#define IS_DES(flags) (flags & QCE_ALG_DES) +#define IS_3DES(flags) (flags & QCE_ALG_3DES) +#define IS_AES(flags) (flags & QCE_ALG_AES) + +#define IS_SHA1(flags) (flags & QCE_HASH_SHA1) +#define IS_SHA256(flags) (flags & QCE_HASH_SHA256) +#define IS_SHA1_HMAC(flags) (flags & QCE_HASH_SHA1_HMAC) +#define IS_SHA256_HMAC(flags) (flags & QCE_HASH_SHA256_HMAC) +#define IS_CMAC(flags) (flags & QCE_HASH_AES_CMAC) +#define IS_SHA(flags) (IS_SHA1(flags) || IS_SHA256(flags)) +#define IS_SHA_HMAC(flags) \ + (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags)) + +#define IS_CBC(mode) (mode & QCE_MODE_CBC) +#define IS_ECB(mode) (mode & QCE_MODE_ECB) +#define IS_CTR(mode) (mode & QCE_MODE_CTR) +#define IS_XTS(mode) (mode & QCE_MODE_XTS) +#define IS_CCM(mode) (mode & QCE_MODE_CCM) + +#define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT) +#define IS_DECRYPT(dir) (dir & QCE_DECRYPT) + +struct qce_alg_template { + struct list_head entry; + u32 crypto_alg_type; + unsigned long alg_flags; + const __be32 *std_iv; + union { + struct crypto_alg crypto; + struct ahash_alg ahash; + } alg; + struct qce_device *qce; +}; + +void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); +int qce_check_status(struct qce_device *qce, u32 *status); +void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step); +int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, + u32 offset); + +#endif /* _COMMON_H_ */ diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c new file mode 100644 index 000000000000..33ae3545dc48 --- /dev/null +++ b/drivers/crypto/qce/core.c @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "cipher.h" +#include "sha.h" + +#define QCE_MAJOR_VERSION5 0x05 +#define QCE_QUEUE_LENGTH 1 + +static const struct qce_algo_ops *qce_ops[] = { + &ablkcipher_ops, + &ahash_ops, +}; + +static void qce_unregister_algs(struct qce_device *qce) +{ + const struct qce_algo_ops *ops; + int i; + + for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { + ops = qce_ops[i]; + ops->unregister_algs(qce); + } +} + +static int qce_register_algs(struct qce_device *qce) +{ + const struct qce_algo_ops *ops; + int i, ret = -ENODEV; + + for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { + ops = qce_ops[i]; + ret = ops->register_algs(qce); + if (ret) + break; + } + + return ret; +} + +static int qce_handle_request(struct crypto_async_request *async_req) +{ + int ret = -EINVAL, i; + const struct qce_algo_ops *ops; + u32 type = crypto_tfm_alg_type(async_req->tfm); + + for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { + ops = qce_ops[i]; + if (type != ops->type) + continue; + ret = ops->async_req_handle(async_req); + break; + } + + return ret; +} + +static int qce_handle_queue(struct qce_device *qce, + struct crypto_async_request *req) +{ + struct crypto_async_request *async_req, *backlog; + unsigned long flags; + int ret = 0, err; + + spin_lock_irqsave(&qce->lock, flags); + + if (req) + ret = crypto_enqueue_request(&qce->queue, req); + + /* busy, do not dequeue request */ + if (qce->req) { + spin_unlock_irqrestore(&qce->lock, flags); + return ret; + } + + backlog = crypto_get_backlog(&qce->queue); + async_req = crypto_dequeue_request(&qce->queue); + if (async_req) + qce->req = async_req; + + spin_unlock_irqrestore(&qce->lock, flags); + + if (!async_req) + return ret; + + if (backlog) { + spin_lock_bh(&qce->lock); + backlog->complete(backlog, -EINPROGRESS); + spin_unlock_bh(&qce->lock); + } + + err = qce_handle_request(async_req); + if (err) { + qce->result = err; + tasklet_schedule(&qce->done_tasklet); + } + + return ret; +} + +static void qce_tasklet_req_done(unsigned long data) +{ + struct qce_device *qce = (struct qce_device *)data; + struct crypto_async_request *req; + unsigned long flags; + + spin_lock_irqsave(&qce->lock, flags); + req = qce->req; + qce->req = NULL; + spin_unlock_irqrestore(&qce->lock, flags); + + if (req) + req->complete(req, qce->result); + + qce_handle_queue(qce, NULL); +} + +static int qce_async_request_enqueue(struct qce_device *qce, + struct crypto_async_request *req) +{ + return qce_handle_queue(qce, req); +} + +static void qce_async_request_done(struct qce_device *qce, int ret) +{ + qce->result = ret; + tasklet_schedule(&qce->done_tasklet); +} + +static int qce_check_version(struct qce_device *qce) +{ + u32 major, minor, step; + + qce_get_version(qce, &major, &minor, &step); + + /* + * the driver does not support v5 with minor 0 because it has special + * alignment requirements. + */ + if (major != QCE_MAJOR_VERSION5 || minor == 0) + return -ENODEV; + + qce->burst_size = QCE_BAM_BURST_SIZE; + qce->pipe_pair_id = 1; + + dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", + major, minor, step); + + return 0; +} + +static int qce_crypto_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct qce_device *qce; + struct resource *res; + int ret; + + qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); + if (!qce) + return -ENOMEM; + + qce->dev = dev; + platform_set_drvdata(pdev, qce); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + qce->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(qce->base)) + return PTR_ERR(qce->base); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret < 0) + return ret; + + qce->core = devm_clk_get(qce->dev, "core"); + if (IS_ERR(qce->core)) + return PTR_ERR(qce->core); + + qce->iface = devm_clk_get(qce->dev, "iface"); + if (IS_ERR(qce->iface)) + return PTR_ERR(qce->iface); + + qce->bus = devm_clk_get(qce->dev, "bus"); + if (IS_ERR(qce->bus)) + return PTR_ERR(qce->bus); + + ret = clk_prepare_enable(qce->core); + if (ret) + return ret; + + ret = clk_prepare_enable(qce->iface); + if (ret) + goto err_clks_core; + + ret = clk_prepare_enable(qce->bus); + if (ret) + goto err_clks_iface; + + ret = qce_dma_request(qce->dev, &qce->dma); + if (ret) + goto err_clks; + + ret = qce_check_version(qce); + if (ret) + goto err_clks; + + spin_lock_init(&qce->lock); + tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, + (unsigned long)qce); + crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); + + qce->async_req_enqueue = qce_async_request_enqueue; + qce->async_req_done = qce_async_request_done; + + ret = qce_register_algs(qce); + if (ret) + goto err_dma; + + return 0; + +err_dma: + qce_dma_release(&qce->dma); +err_clks: + clk_disable_unprepare(qce->bus); +err_clks_iface: + clk_disable_unprepare(qce->iface); +err_clks_core: + clk_disable_unprepare(qce->core); + return ret; +} + +static int qce_crypto_remove(struct platform_device *pdev) +{ + struct qce_device *qce = platform_get_drvdata(pdev); + + tasklet_kill(&qce->done_tasklet); + qce_unregister_algs(qce); + qce_dma_release(&qce->dma); + clk_disable_unprepare(qce->bus); + clk_disable_unprepare(qce->iface); + clk_disable_unprepare(qce->core); + return 0; +} + +static const struct of_device_id qce_crypto_of_match[] = { + { .compatible = "qcom,crypto-v5.1", }, + {} +}; +MODULE_DEVICE_TABLE(of, qce_crypto_of_match); + +static struct platform_driver qce_crypto_driver = { + .probe = qce_crypto_probe, + .remove = qce_crypto_remove, + .driver = { + .owner = THIS_MODULE, + .name = KBUILD_MODNAME, + .of_match_table = qce_crypto_of_match, + }, +}; +module_platform_driver(qce_crypto_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm crypto engine driver"); +MODULE_ALIAS("platform:" KBUILD_MODNAME); +MODULE_AUTHOR("The Linux Foundation"); diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h new file mode 100644 index 000000000000..549965d4d91f --- /dev/null +++ b/drivers/crypto/qce/core.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CORE_H_ +#define _CORE_H_ + +#include "dma.h" + +/** + * struct qce_device - crypto engine device structure + * @queue: crypto request queue + * @lock: the lock protects queue and req + * @done_tasklet: done tasklet object + * @req: current active request + * @result: result of current transform + * @base: virtual IO base + * @dev: pointer to device structure + * @core: core device clock + * @iface: interface clock + * @bus: bus clock + * @dma: pointer to dma data + * @burst_size: the crypto burst size + * @pipe_pair_id: which pipe pair id the device using + * @async_req_enqueue: invoked by every algorithm to enqueue a request + * @async_req_done: invoked by every algorithm to finish its request + */ +struct qce_device { + struct crypto_queue queue; + spinlock_t lock; + struct tasklet_struct done_tasklet; + struct crypto_async_request *req; + int result; + void __iomem *base; + struct device *dev; + struct clk *core, *iface, *bus; + struct qce_dma_data dma; + int burst_size; + unsigned int pipe_pair_id; + int (*async_req_enqueue)(struct qce_device *qce, + struct crypto_async_request *req); + void (*async_req_done)(struct qce_device *qce, int ret); +}; + +/** + * struct qce_algo_ops - algorithm operations per crypto type + * @type: should be CRYPTO_ALG_TYPE_XXX + * @register_algs: invoked by core to register the algorithms + * @unregister_algs: invoked by core to unregister the algorithms + * @async_req_handle: invoked by core to handle enqueued request + */ +struct qce_algo_ops { + u32 type; + int (*register_algs)(struct qce_device *qce); + void (*unregister_algs)(struct qce_device *qce); + int (*async_req_handle)(struct crypto_async_request *async_req); +}; + +#endif /* _CORE_H_ */ diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c new file mode 100644 index 000000000000..0fb21e13f247 --- /dev/null +++ b/drivers/crypto/qce/dma.c @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "dma.h" + +int qce_dma_request(struct device *dev, struct qce_dma_data *dma) +{ + int ret; + + dma->txchan = dma_request_slave_channel_reason(dev, "tx"); + if (IS_ERR(dma->txchan)) + return PTR_ERR(dma->txchan); + + dma->rxchan = dma_request_slave_channel_reason(dev, "rx"); + if (IS_ERR(dma->rxchan)) { + ret = PTR_ERR(dma->rxchan); + goto error_rx; + } + + dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ, + GFP_KERNEL); + if (!dma->result_buf) { + ret = -ENOMEM; + goto error_nomem; + } + + dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ; + + return 0; +error_nomem: + dma_release_channel(dma->rxchan); +error_rx: + dma_release_channel(dma->txchan); + return ret; +} + +void qce_dma_release(struct qce_dma_data *dma) +{ + dma_release_channel(dma->txchan); + dma_release_channel(dma->rxchan); + kfree(dma->result_buf); +} + +int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, bool chained) +{ + int err; + + if (chained) { + while (sg) { + err = dma_map_sg(dev, sg, 1, dir); + if (!err) + return -EFAULT; + sg = scatterwalk_sg_next(sg); + } + } else { + err = dma_map_sg(dev, sg, nents, dir); + if (!err) + return -EFAULT; + } + + return nents; +} + +void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, bool chained) +{ + if (chained) + while (sg) { + dma_unmap_sg(dev, sg, 1, dir); + sg = scatterwalk_sg_next(sg); + } + else + dma_unmap_sg(dev, sg, nents, dir); +} + +int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained) +{ + struct scatterlist *sg = sglist; + int nents = 0; + + if (chained) + *chained = false; + + while (nbytes > 0 && sg) { + nents++; + nbytes -= sg->length; + if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) + *chained = true; + sg = scatterwalk_sg_next(sg); + } + + return nents; +} + +struct scatterlist * +qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) +{ + struct scatterlist *sg = sgt->sgl, *sg_last = NULL; + + while (sg) { + if (!sg_page(sg)) + break; + sg = sg_next(sg); + } + + if (!sg) + return ERR_PTR(-EINVAL); + + while (new_sgl && sg) { + sg_set_page(sg, sg_page(new_sgl), new_sgl->length, + new_sgl->offset); + sg_last = sg; + sg = sg_next(sg); + new_sgl = sg_next(new_sgl); + } + + return sg_last; +} + +static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg, + int nents, unsigned long flags, + enum dma_transfer_direction dir, + dma_async_tx_callback cb, void *cb_param) +{ + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + + if (!sg || !nents) + return -EINVAL; + + desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags); + if (!desc) + return -EINVAL; + + desc->callback = cb; + desc->callback_param = cb_param; + cookie = dmaengine_submit(desc); + + return dma_submit_error(cookie); +} + +int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg, + int rx_nents, struct scatterlist *tx_sg, int tx_nents, + dma_async_tx_callback cb, void *cb_param) +{ + struct dma_chan *rxchan = dma->rxchan; + struct dma_chan *txchan = dma->txchan; + unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; + int ret; + + ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV, + NULL, NULL); + if (ret) + return ret; + + return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM, + cb, cb_param); +} + +void qce_dma_issue_pending(struct qce_dma_data *dma) +{ + dma_async_issue_pending(dma->rxchan); + dma_async_issue_pending(dma->txchan); +} + +int qce_dma_terminate_all(struct qce_dma_data *dma) +{ + int ret; + + ret = dmaengine_terminate_all(dma->rxchan); + return ret ?: dmaengine_terminate_all(dma->txchan); +} diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h new file mode 100644 index 000000000000..805e378d59e9 --- /dev/null +++ b/drivers/crypto/qce/dma.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DMA_H_ +#define _DMA_H_ + +/* maximum data transfer block size between BAM and CE */ +#define QCE_BAM_BURST_SIZE 64 + +#define QCE_AUTHIV_REGS_CNT 16 +#define QCE_AUTH_BYTECOUNT_REGS_CNT 4 +#define QCE_CNTRIV_REGS_CNT 4 + +struct qce_result_dump { + u32 auth_iv[QCE_AUTHIV_REGS_CNT]; + u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT]; + u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT]; + u32 status; + u32 status2; +}; + +#define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE) +#define QCE_RESULT_BUF_SZ \ + ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE) + +struct qce_dma_data { + struct dma_chan *txchan; + struct dma_chan *rxchan; + struct qce_result_dump *result_buf; + void *ignore_buf; +}; + +int qce_dma_request(struct device *dev, struct qce_dma_data *dma); +void qce_dma_release(struct qce_dma_data *dma); +int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in, + int in_ents, struct scatterlist *sg_out, int out_ents, + dma_async_tx_callback cb, void *cb_param); +void qce_dma_issue_pending(struct qce_dma_data *dma); +int qce_dma_terminate_all(struct qce_dma_data *dma); +int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained); +void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, bool chained); +int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, bool chained); +struct scatterlist * +qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); + +#endif /* _DMA_H_ */ diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h new file mode 100644 index 000000000000..f0e19e35664a --- /dev/null +++ b/drivers/crypto/qce/regs-v5.h @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _REGS_V5_H_ +#define _REGS_V5_H_ + +#include + +#define REG_VERSION 0x000 +#define REG_STATUS 0x100 +#define REG_STATUS2 0x104 +#define REG_ENGINES_AVAIL 0x108 +#define REG_FIFO_SIZES 0x10c +#define REG_SEG_SIZE 0x110 +#define REG_GOPROC 0x120 +#define REG_ENCR_SEG_CFG 0x200 +#define REG_ENCR_SEG_SIZE 0x204 +#define REG_ENCR_SEG_START 0x208 +#define REG_CNTR0_IV0 0x20c +#define REG_CNTR1_IV1 0x210 +#define REG_CNTR2_IV2 0x214 +#define REG_CNTR3_IV3 0x218 +#define REG_CNTR_MASK 0x21C +#define REG_ENCR_CCM_INT_CNTR0 0x220 +#define REG_ENCR_CCM_INT_CNTR1 0x224 +#define REG_ENCR_CCM_INT_CNTR2 0x228 +#define REG_ENCR_CCM_INT_CNTR3 0x22c +#define REG_ENCR_XTS_DU_SIZE 0x230 +#define REG_CNTR_MASK2 0x234 +#define REG_CNTR_MASK1 0x238 +#define REG_CNTR_MASK0 0x23c +#define REG_AUTH_SEG_CFG 0x300 +#define REG_AUTH_SEG_SIZE 0x304 +#define REG_AUTH_SEG_START 0x308 +#define REG_AUTH_IV0 0x310 +#define REG_AUTH_IV1 0x314 +#define REG_AUTH_IV2 0x318 +#define REG_AUTH_IV3 0x31c +#define REG_AUTH_IV4 0x320 +#define REG_AUTH_IV5 0x324 +#define REG_AUTH_IV6 0x328 +#define REG_AUTH_IV7 0x32c +#define REG_AUTH_IV8 0x330 +#define REG_AUTH_IV9 0x334 +#define REG_AUTH_IV10 0x338 +#define REG_AUTH_IV11 0x33c +#define REG_AUTH_IV12 0x340 +#define REG_AUTH_IV13 0x344 +#define REG_AUTH_IV14 0x348 +#define REG_AUTH_IV15 0x34c +#define REG_AUTH_INFO_NONCE0 0x350 +#define REG_AUTH_INFO_NONCE1 0x354 +#define REG_AUTH_INFO_NONCE2 0x358 +#define REG_AUTH_INFO_NONCE3 0x35c +#define REG_AUTH_BYTECNT0 0x390 +#define REG_AUTH_BYTECNT1 0x394 +#define REG_AUTH_BYTECNT2 0x398 +#define REG_AUTH_BYTECNT3 0x39c +#define REG_AUTH_EXP_MAC0 0x3a0 +#define REG_AUTH_EXP_MAC1 0x3a4 +#define REG_AUTH_EXP_MAC2 0x3a8 +#define REG_AUTH_EXP_MAC3 0x3ac +#define REG_AUTH_EXP_MAC4 0x3b0 +#define REG_AUTH_EXP_MAC5 0x3b4 +#define REG_AUTH_EXP_MAC6 0x3b8 +#define REG_AUTH_EXP_MAC7 0x3bc +#define REG_CONFIG 0x400 +#define REG_GOPROC_QC_KEY 0x1000 +#define REG_GOPROC_OEM_KEY 0x2000 +#define REG_ENCR_KEY0 0x3000 +#define REG_ENCR_KEY1 0x3004 +#define REG_ENCR_KEY2 0x3008 +#define REG_ENCR_KEY3 0x300c +#define REG_ENCR_KEY4 0x3010 +#define REG_ENCR_KEY5 0x3014 +#define REG_ENCR_KEY6 0x3018 +#define REG_ENCR_KEY7 0x301c +#define REG_ENCR_XTS_KEY0 0x3020 +#define REG_ENCR_XTS_KEY1 0x3024 +#define REG_ENCR_XTS_KEY2 0x3028 +#define REG_ENCR_XTS_KEY3 0x302c +#define REG_ENCR_XTS_KEY4 0x3030 +#define REG_ENCR_XTS_KEY5 0x3034 +#define REG_ENCR_XTS_KEY6 0x3038 +#define REG_ENCR_XTS_KEY7 0x303c +#define REG_AUTH_KEY0 0x3040 +#define REG_AUTH_KEY1 0x3044 +#define REG_AUTH_KEY2 0x3048 +#define REG_AUTH_KEY3 0x304c +#define REG_AUTH_KEY4 0x3050 +#define REG_AUTH_KEY5 0x3054 +#define REG_AUTH_KEY6 0x3058 +#define REG_AUTH_KEY7 0x305c +#define REG_AUTH_KEY8 0x3060 +#define REG_AUTH_KEY9 0x3064 +#define REG_AUTH_KEY10 0x3068 +#define REG_AUTH_KEY11 0x306c +#define REG_AUTH_KEY12 0x3070 +#define REG_AUTH_KEY13 0x3074 +#define REG_AUTH_KEY14 0x3078 +#define REG_AUTH_KEY15 0x307c + +/* Register bits - REG_VERSION */ +#define CORE_STEP_REV_SHIFT 0 +#define CORE_STEP_REV_MASK GENMASK(15, 0) +#define CORE_MINOR_REV_SHIFT 16 +#define CORE_MINOR_REV_MASK GENMASK(23, 16) +#define CORE_MAJOR_REV_SHIFT 24 +#define CORE_MAJOR_REV_MASK GENMASK(31, 24) + +/* Register bits - REG_STATUS */ +#define MAC_FAILED_SHIFT 31 +#define DOUT_SIZE_AVAIL_SHIFT 26 +#define DOUT_SIZE_AVAIL_MASK GENMASK(30, 26) +#define DIN_SIZE_AVAIL_SHIFT 21 +#define DIN_SIZE_AVAIL_MASK GENMASK(25, 21) +#define HSD_ERR_SHIFT 20 +#define ACCESS_VIOL_SHIFT 19 +#define PIPE_ACTIVE_ERR_SHIFT 18 +#define CFG_CHNG_ERR_SHIFT 17 +#define DOUT_ERR_SHIFT 16 +#define DIN_ERR_SHIFT 15 +#define AXI_ERR_SHIFT 14 +#define CRYPTO_STATE_SHIFT 10 +#define CRYPTO_STATE_MASK GENMASK(13, 10) +#define ENCR_BUSY_SHIFT 9 +#define AUTH_BUSY_SHIFT 8 +#define DOUT_INTR_SHIFT 7 +#define DIN_INTR_SHIFT 6 +#define OP_DONE_INTR_SHIFT 5 +#define ERR_INTR_SHIFT 4 +#define DOUT_RDY_SHIFT 3 +#define DIN_RDY_SHIFT 2 +#define OPERATION_DONE_SHIFT 1 +#define SW_ERR_SHIFT 0 + +/* Register bits - REG_STATUS2 */ +#define AXI_EXTRA_SHIFT 1 +#define LOCKED_SHIFT 2 + +/* Register bits - REG_CONFIG */ +#define REQ_SIZE_SHIFT 17 +#define REQ_SIZE_MASK GENMASK(20, 17) +#define REQ_SIZE_ENUM_1_BEAT 0 +#define REQ_SIZE_ENUM_2_BEAT 1 +#define REQ_SIZE_ENUM_3_BEAT 2 +#define REQ_SIZE_ENUM_4_BEAT 3 +#define REQ_SIZE_ENUM_5_BEAT 4 +#define REQ_SIZE_ENUM_6_BEAT 5 +#define REQ_SIZE_ENUM_7_BEAT 6 +#define REQ_SIZE_ENUM_8_BEAT 7 +#define REQ_SIZE_ENUM_9_BEAT 8 +#define REQ_SIZE_ENUM_10_BEAT 9 +#define REQ_SIZE_ENUM_11_BEAT 10 +#define REQ_SIZE_ENUM_12_BEAT 11 +#define REQ_SIZE_ENUM_13_BEAT 12 +#define REQ_SIZE_ENUM_14_BEAT 13 +#define REQ_SIZE_ENUM_15_BEAT 14 +#define REQ_SIZE_ENUM_16_BEAT 15 + +#define MAX_QUEUED_REQ_SHIFT 14 +#define MAX_QUEUED_REQ_MASK GENMASK(24, 16) +#define ENUM_1_QUEUED_REQS 0 +#define ENUM_2_QUEUED_REQS 1 +#define ENUM_3_QUEUED_REQS 2 + +#define IRQ_ENABLES_SHIFT 10 +#define IRQ_ENABLES_MASK GENMASK(13, 10) + +#define LITTLE_ENDIAN_MODE_SHIFT 9 +#define PIPE_SET_SELECT_SHIFT 5 +#define PIPE_SET_SELECT_MASK GENMASK(8, 5) + +#define HIGH_SPD_EN_N_SHIFT 4 +#define MASK_DOUT_INTR_SHIFT 3 +#define MASK_DIN_INTR_SHIFT 2 +#define MASK_OP_DONE_INTR_SHIFT 1 +#define MASK_ERR_INTR_SHIFT 0 + +/* Register bits - REG_AUTH_SEG_CFG */ +#define COMP_EXP_MAC_SHIFT 24 +#define COMP_EXP_MAC_DISABLED 0 +#define COMP_EXP_MAC_ENABLED 1 + +#define F9_DIRECTION_SHIFT 23 +#define F9_DIRECTION_UPLINK 0 +#define F9_DIRECTION_DOWNLINK 1 + +#define AUTH_NONCE_NUM_WORDS_SHIFT 20 +#define AUTH_NONCE_NUM_WORDS_MASK GENMASK(22, 20) + +#define USE_PIPE_KEY_AUTH_SHIFT 19 +#define USE_HW_KEY_AUTH_SHIFT 18 +#define AUTH_FIRST_SHIFT 17 +#define AUTH_LAST_SHIFT 16 + +#define AUTH_POS_SHIFT 14 +#define AUTH_POS_MASK GENMASK(15, 14) +#define AUTH_POS_BEFORE 0 +#define AUTH_POS_AFTER 1 + +#define AUTH_SIZE_SHIFT 9 +#define AUTH_SIZE_MASK GENMASK(13, 9) +#define AUTH_SIZE_SHA1 0 +#define AUTH_SIZE_SHA256 1 +#define AUTH_SIZE_ENUM_1_BYTES 0 +#define AUTH_SIZE_ENUM_2_BYTES 1 +#define AUTH_SIZE_ENUM_3_BYTES 2 +#define AUTH_SIZE_ENUM_4_BYTES 3 +#define AUTH_SIZE_ENUM_5_BYTES 4 +#define AUTH_SIZE_ENUM_6_BYTES 5 +#define AUTH_SIZE_ENUM_7_BYTES 6 +#define AUTH_SIZE_ENUM_8_BYTES 7 +#define AUTH_SIZE_ENUM_9_BYTES 8 +#define AUTH_SIZE_ENUM_10_BYTES 9 +#define AUTH_SIZE_ENUM_11_BYTES 10 +#define AUTH_SIZE_ENUM_12_BYTES 11 +#define AUTH_SIZE_ENUM_13_BYTES 12 +#define AUTH_SIZE_ENUM_14_BYTES 13 +#define AUTH_SIZE_ENUM_15_BYTES 14 +#define AUTH_SIZE_ENUM_16_BYTES 15 + +#define AUTH_MODE_SHIFT 6 +#define AUTH_MODE_MASK GENMASK(8, 6) +#define AUTH_MODE_HASH 0 +#define AUTH_MODE_HMAC 1 +#define AUTH_MODE_CCM 0 +#define AUTH_MODE_CMAC 1 + +#define AUTH_KEY_SIZE_SHIFT 3 +#define AUTH_KEY_SIZE_MASK GENMASK(5, 3) +#define AUTH_KEY_SZ_AES128 0 +#define AUTH_KEY_SZ_AES256 2 + +#define AUTH_ALG_SHIFT 0 +#define AUTH_ALG_MASK GENMASK(2, 0) +#define AUTH_ALG_NONE 0 +#define AUTH_ALG_SHA 1 +#define AUTH_ALG_AES 2 +#define AUTH_ALG_KASUMI 3 +#define AUTH_ALG_SNOW3G 4 +#define AUTH_ALG_ZUC 5 + +/* Register bits - REG_ENCR_XTS_DU_SIZE */ +#define ENCR_XTS_DU_SIZE_SHIFT 0 +#define ENCR_XTS_DU_SIZE_MASK GENMASK(19, 0) + +/* Register bits - REG_ENCR_SEG_CFG */ +#define F8_KEYSTREAM_ENABLE_SHIFT 17 +#define F8_KEYSTREAM_DISABLED 0 +#define F8_KEYSTREAM_ENABLED 1 + +#define F8_DIRECTION_SHIFT 16 +#define F8_DIRECTION_UPLINK 0 +#define F8_DIRECTION_DOWNLINK 1 + +#define USE_PIPE_KEY_ENCR_SHIFT 15 +#define USE_PIPE_KEY_ENCR_ENABLED 1 +#define USE_KEY_REGISTERS 0 + +#define USE_HW_KEY_ENCR_SHIFT 14 +#define USE_KEY_REG 0 +#define USE_HW_KEY 1 + +#define LAST_CCM_SHIFT 13 +#define LAST_CCM_XFR 1 +#define INTERM_CCM_XFR 0 + +#define CNTR_ALG_SHIFT 11 +#define CNTR_ALG_MASK GENMASK(12, 11) +#define CNTR_ALG_NIST 0 + +#define ENCODE_SHIFT 10 + +#define ENCR_MODE_SHIFT 6 +#define ENCR_MODE_MASK GENMASK(9, 6) +#define ENCR_MODE_ECB 0 +#define ENCR_MODE_CBC 1 +#define ENCR_MODE_CTR 2 +#define ENCR_MODE_XTS 3 +#define ENCR_MODE_CCM 4 + +#define ENCR_KEY_SZ_SHIFT 3 +#define ENCR_KEY_SZ_MASK GENMASK(5, 3) +#define ENCR_KEY_SZ_DES 0 +#define ENCR_KEY_SZ_3DES 1 +#define ENCR_KEY_SZ_AES128 0 +#define ENCR_KEY_SZ_AES256 2 + +#define ENCR_ALG_SHIFT 0 +#define ENCR_ALG_MASK GENMASK(2, 0) +#define ENCR_ALG_NONE 0 +#define ENCR_ALG_DES 1 +#define ENCR_ALG_AES 2 +#define ENCR_ALG_KASUMI 4 +#define ENCR_ALG_SNOW_3G 5 +#define ENCR_ALG_ZUC 6 + +/* Register bits - REG_GOPROC */ +#define GO_SHIFT 0 +#define CLR_CNTXT_SHIFT 1 +#define RESULTS_DUMP_SHIFT 2 + +/* Register bits - REG_ENGINES_AVAIL */ +#define ENCR_AES_SEL_SHIFT 0 +#define DES_SEL_SHIFT 1 +#define ENCR_SNOW3G_SEL_SHIFT 2 +#define ENCR_KASUMI_SEL_SHIFT 3 +#define SHA_SEL_SHIFT 4 +#define SHA512_SEL_SHIFT 5 +#define AUTH_AES_SEL_SHIFT 6 +#define AUTH_SNOW3G_SEL_SHIFT 7 +#define AUTH_KASUMI_SEL_SHIFT 8 +#define BAM_PIPE_SETS_SHIFT 9 +#define BAM_PIPE_SETS_MASK GENMASK(12, 9) +#define AXI_WR_BEATS_SHIFT 13 +#define AXI_WR_BEATS_MASK GENMASK(18, 13) +#define AXI_RD_BEATS_SHIFT 19 +#define AXI_RD_BEATS_MASK GENMASK(24, 19) +#define ENCR_ZUC_SEL_SHIFT 26 +#define AUTH_ZUC_SEL_SHIFT 27 +#define ZUC_ENABLE_SHIFT 28 + +#endif /* _REGS_V5_H_ */ diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c new file mode 100644 index 000000000000..3c33ac9c8cba --- /dev/null +++ b/drivers/crypto/qce/sha.c @@ -0,0 +1,588 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "common.h" +#include "core.h" +#include "sha.h" + +/* crypto hw padding constant for first operation */ +#define SHA_PADDING 64 +#define SHA_PADDING_MASK (SHA_PADDING - 1) + +static LIST_HEAD(ahash_algs); + +static const __be32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(__be32)] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 +}; + +static const __be32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(__be32)] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 +}; + +static void qce_ahash_done(void *data) +{ + struct crypto_async_request *async_req = data; + struct ahash_request *req = ahash_request_cast(async_req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + struct qce_result_dump *result = qce->dma.result_buf; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + int error; + u32 status; + + error = qce_dma_terminate_all(&qce->dma); + if (error) + dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); + + qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, + rctx->src_chained); + qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); + + memcpy(rctx->digest, result->auth_iv, digestsize); + if (req->result) + memcpy(req->result, result->auth_iv, digestsize); + + rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); + rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); + + error = qce_check_status(qce, &status); + if (error < 0) + dev_dbg(qce->dev, "ahash operation error (%x)\n", status); + + req->src = rctx->src_orig; + req->nbytes = rctx->nbytes_orig; + rctx->last_blk = false; + rctx->first_blk = false; + + qce->async_req_done(tmpl->qce, error); +} + +static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); + struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); + struct qce_device *qce = tmpl->qce; + unsigned long flags = rctx->flags; + int ret; + + if (IS_SHA_HMAC(flags)) { + rctx->authkey = ctx->authkey; + rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; + } else if (IS_CMAC(flags)) { + rctx->authkey = ctx->authkey; + rctx->authklen = AES_KEYSIZE_128; + } + + rctx->src_nents = qce_countsg(req->src, req->nbytes, + &rctx->src_chained); + ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, + rctx->src_chained); + if (ret < 0) + return ret; + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + + ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); + if (ret < 0) + goto error_unmap_src; + + ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, + &rctx->result_sg, 1, qce_ahash_done, async_req); + if (ret) + goto error_unmap_dst; + + qce_dma_issue_pending(&qce->dma); + + ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); + if (ret) + goto error_terminate; + + return 0; + +error_terminate: + qce_dma_terminate_all(&qce->dma); +error_unmap_dst: + qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); +error_unmap_src: + qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, + rctx->src_chained); + return ret; +} + +static int qce_ahash_init(struct ahash_request *req) +{ + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); + const __be32 *std_iv = tmpl->std_iv; + + memset(rctx, 0, sizeof(*rctx)); + rctx->first_blk = true; + rctx->last_blk = false; + rctx->flags = tmpl->alg_flags; + memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); + + return 0; +} + +static int qce_ahash_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + unsigned long flags = rctx->flags; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); + + if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { + struct sha1_state *out_state = out; + + out_state->count = rctx->count; + qce_cpu_to_be32p_array(out_state->state, rctx->digest, + digestsize); + memcpy(out_state->buffer, rctx->buf, blocksize); + } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { + struct sha256_state *out_state = out; + + out_state->count = rctx->count; + qce_cpu_to_be32p_array(out_state->state, rctx->digest, + digestsize); + memcpy(out_state->buf, rctx->buf, blocksize); + } else { + return -EINVAL; + } + + return 0; +} + +static int qce_import_common(struct ahash_request *req, u64 in_count, + const u32 *state, const u8 *buffer, bool hmac) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + unsigned int digestsize = crypto_ahash_digestsize(ahash); + unsigned int blocksize; + u64 count = in_count; + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); + rctx->count = in_count; + memcpy(rctx->buf, buffer, blocksize); + + if (in_count <= blocksize) { + rctx->first_blk = 1; + } else { + rctx->first_blk = 0; + /* + * For HMAC, there is a hardware padding done when first block + * is set. Therefore the byte_count must be incremened by 64 + * after the first block operation. + */ + if (hmac) + count += SHA_PADDING; + } + + rctx->byte_count[0] = (__be32)(count & ~SHA_PADDING_MASK); + rctx->byte_count[1] = (__be32)(count >> 32); + qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, + digestsize); + rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); + + return 0; +} + +static int qce_ahash_import(struct ahash_request *req, const void *in) +{ + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + unsigned long flags = rctx->flags; + bool hmac = IS_SHA_HMAC(flags); + int ret = -EINVAL; + + if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { + const struct sha1_state *state = in; + + ret = qce_import_common(req, state->count, state->state, + state->buffer, hmac); + } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { + const struct sha256_state *state = in; + + ret = qce_import_common(req, state->count, state->state, + state->buf, hmac); + } + + return ret; +} + +static int qce_ahash_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); + struct qce_device *qce = tmpl->qce; + struct scatterlist *sg_last, *sg; + unsigned int total, len; + unsigned int hash_later; + unsigned int nbytes; + unsigned int blocksize; + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + rctx->count += req->nbytes; + + /* check for buffer from previous updates and append it */ + total = req->nbytes + rctx->buflen; + + if (total <= blocksize) { + scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, + 0, req->nbytes, 0); + rctx->buflen += req->nbytes; + return 0; + } + + /* save the original req structure fields */ + rctx->src_orig = req->src; + rctx->nbytes_orig = req->nbytes; + + /* + * if we have data from previous update copy them on buffer. The old + * data will be combined with current request bytes. + */ + if (rctx->buflen) + memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); + + /* calculate how many bytes will be hashed later */ + hash_later = total % blocksize; + if (hash_later) { + unsigned int src_offset = req->nbytes - hash_later; + scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, + hash_later, 0); + } + + /* here nbytes is multiple of blocksize */ + nbytes = total - hash_later; + + len = rctx->buflen; + sg = sg_last = req->src; + + while (len < nbytes && sg) { + if (len + sg_dma_len(sg) > nbytes) + break; + len += sg_dma_len(sg); + sg_last = sg; + sg = scatterwalk_sg_next(sg); + } + + if (!sg_last) + return -EINVAL; + + sg_mark_end(sg_last); + + if (rctx->buflen) { + sg_init_table(rctx->sg, 2); + sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); + scatterwalk_sg_chain(rctx->sg, 2, req->src); + req->src = rctx->sg; + } + + req->nbytes = nbytes; + rctx->buflen = hash_later; + + return qce->async_req_enqueue(tmpl->qce, &req->base); +} + +static int qce_ahash_final(struct ahash_request *req) +{ + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); + struct qce_device *qce = tmpl->qce; + + if (!rctx->buflen) + return 0; + + rctx->last_blk = true; + + rctx->src_orig = req->src; + rctx->nbytes_orig = req->nbytes; + + memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); + sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); + + req->src = rctx->sg; + req->nbytes = rctx->buflen; + + return qce->async_req_enqueue(tmpl->qce, &req->base); +} + +static int qce_ahash_digest(struct ahash_request *req) +{ + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); + struct qce_device *qce = tmpl->qce; + int ret; + + ret = qce_ahash_init(req); + if (ret) + return ret; + + rctx->src_orig = req->src; + rctx->nbytes_orig = req->nbytes; + rctx->first_blk = true; + rctx->last_blk = true; + + return qce->async_req_enqueue(tmpl->qce, &req->base); +} + +struct qce_ahash_result { + struct completion completion; + int error; +}; + +static void qce_digest_complete(struct crypto_async_request *req, int error) +{ + struct qce_ahash_result *result = req->data; + + if (error == -EINPROGRESS) + return; + + result->error = error; + complete(&result->completion); +} + +static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + unsigned int digestsize = crypto_ahash_digestsize(tfm); + struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); + struct qce_ahash_result result; + struct ahash_request *req; + struct scatterlist sg; + unsigned int blocksize; + struct crypto_ahash *ahash_tfm; + u8 *buf; + int ret; + const char *alg_name; + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + memset(ctx->authkey, 0, sizeof(ctx->authkey)); + + if (keylen <= blocksize) { + memcpy(ctx->authkey, key, keylen); + return 0; + } + + if (digestsize == SHA1_DIGEST_SIZE) + alg_name = "sha1-qce"; + else if (digestsize == SHA256_DIGEST_SIZE) + alg_name = "sha256-qce"; + else + return -EINVAL; + + ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, + CRYPTO_ALG_TYPE_AHASH_MASK); + if (IS_ERR(ahash_tfm)) + return PTR_ERR(ahash_tfm); + + req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto err_free_ahash; + } + + init_completion(&result.completion); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + qce_digest_complete, &result); + crypto_ahash_clear_flags(ahash_tfm, ~0); + + buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_free_req; + } + + memcpy(buf, key, keylen); + sg_init_one(&sg, buf, keylen); + ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); + + ret = crypto_ahash_digest(req); + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = wait_for_completion_interruptible(&result.completion); + if (!ret) + ret = result.error; + } + + if (ret) + crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + + kfree(buf); +err_free_req: + ahash_request_free(req); +err_free_ahash: + crypto_free_ahash(ahash_tfm); + return ret; +} + +static int qce_ahash_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); + memset(ctx, 0, sizeof(*ctx)); + return 0; +} + +struct qce_ahash_def { + unsigned long flags; + const char *name; + const char *drv_name; + unsigned int digestsize; + unsigned int blocksize; + unsigned int statesize; + const __be32 *std_iv; +}; + +static const struct qce_ahash_def ahash_def[] = { + { + .flags = QCE_HASH_SHA1, + .name = "sha1", + .drv_name = "sha1-qce", + .digestsize = SHA1_DIGEST_SIZE, + .blocksize = SHA1_BLOCK_SIZE, + .statesize = sizeof(struct sha1_state), + .std_iv = std_iv_sha1, + }, + { + .flags = QCE_HASH_SHA256, + .name = "sha256", + .drv_name = "sha256-qce", + .digestsize = SHA256_DIGEST_SIZE, + .blocksize = SHA256_BLOCK_SIZE, + .statesize = sizeof(struct sha256_state), + .std_iv = std_iv_sha256, + }, + { + .flags = QCE_HASH_SHA1_HMAC, + .name = "hmac(sha1)", + .drv_name = "hmac-sha1-qce", + .digestsize = SHA1_DIGEST_SIZE, + .blocksize = SHA1_BLOCK_SIZE, + .statesize = sizeof(struct sha1_state), + .std_iv = std_iv_sha1, + }, + { + .flags = QCE_HASH_SHA256_HMAC, + .name = "hmac(sha256)", + .drv_name = "hmac-sha256-qce", + .digestsize = SHA256_DIGEST_SIZE, + .blocksize = SHA256_BLOCK_SIZE, + .statesize = sizeof(struct sha256_state), + .std_iv = std_iv_sha256, + }, +}; + +static int qce_ahash_register_one(const struct qce_ahash_def *def, + struct qce_device *qce) +{ + struct qce_alg_template *tmpl; + struct ahash_alg *alg; + struct crypto_alg *base; + int ret; + + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); + if (!tmpl) + return -ENOMEM; + + tmpl->std_iv = def->std_iv; + + alg = &tmpl->alg.ahash; + alg->init = qce_ahash_init; + alg->update = qce_ahash_update; + alg->final = qce_ahash_final; + alg->digest = qce_ahash_digest; + alg->export = qce_ahash_export; + alg->import = qce_ahash_import; + if (IS_SHA_HMAC(def->flags)) + alg->setkey = qce_ahash_hmac_setkey; + alg->halg.digestsize = def->digestsize; + alg->halg.statesize = def->statesize; + + base = &alg->halg.base; + base->cra_blocksize = def->blocksize; + base->cra_priority = 300; + base->cra_flags = CRYPTO_ALG_ASYNC; + base->cra_ctxsize = sizeof(struct qce_sha_ctx); + base->cra_alignmask = 0; + base->cra_module = THIS_MODULE; + base->cra_init = qce_ahash_cra_init; + INIT_LIST_HEAD(&base->cra_list); + + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + + INIT_LIST_HEAD(&tmpl->entry); + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; + tmpl->alg_flags = def->flags; + tmpl->qce = qce; + + ret = crypto_register_ahash(alg); + if (ret) { + kfree(tmpl); + dev_err(qce->dev, "%s registration failed\n", base->cra_name); + return ret; + } + + list_add_tail(&tmpl->entry, &ahash_algs); + dev_dbg(qce->dev, "%s is registered\n", base->cra_name); + return 0; +} + +static void qce_ahash_unregister(struct qce_device *qce) +{ + struct qce_alg_template *tmpl, *n; + + list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { + crypto_unregister_ahash(&tmpl->alg.ahash); + list_del(&tmpl->entry); + kfree(tmpl); + } +} + +static int qce_ahash_register(struct qce_device *qce) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { + ret = qce_ahash_register_one(&ahash_def[i], qce); + if (ret) + goto err; + } + + return 0; +err: + qce_ahash_unregister(qce); + return ret; +} + +const struct qce_algo_ops ahash_ops = { + .type = CRYPTO_ALG_TYPE_AHASH, + .register_algs = qce_ahash_register, + .unregister_algs = qce_ahash_unregister, + .async_req_handle = qce_ahash_async_req_handle, +}; diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h new file mode 100644 index 000000000000..286f0d5397f3 --- /dev/null +++ b/drivers/crypto/qce/sha.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SHA_H_ +#define _SHA_H_ + +#include +#include + +#include "common.h" +#include "core.h" + +#define QCE_SHA_MAX_BLOCKSIZE SHA256_BLOCK_SIZE +#define QCE_SHA_MAX_DIGESTSIZE SHA256_DIGEST_SIZE + +struct qce_sha_ctx { + u8 authkey[QCE_SHA_MAX_BLOCKSIZE]; +}; + +/** + * struct qce_sha_reqctx - holds private ahash objects per request + * @buf: used during update, import and export + * @tmpbuf: buffer for internal use + * @digest: calculated digest buffer + * @buflen: length of the buffer + * @flags: operation flags + * @src_orig: original request sg list + * @nbytes_orig: original request number of bytes + * @src_chained: is source scatterlist chained + * @src_nents: source number of entries + * @byte_count: byte count + * @count: save count in states during update, import and export + * @first_blk: is it the first block + * @last_blk: is it the last block + * @sg: used to chain sg lists + * @authkey: pointer to auth key in sha ctx + * @authklen: auth key length + * @result_sg: scatterlist used for result buffer + */ +struct qce_sha_reqctx { + u8 buf[QCE_SHA_MAX_BLOCKSIZE]; + u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE]; + u8 digest[QCE_SHA_MAX_DIGESTSIZE]; + unsigned int buflen; + unsigned long flags; + struct scatterlist *src_orig; + unsigned int nbytes_orig; + bool src_chained; + int src_nents; + __be32 byte_count[2]; + u64 count; + bool first_blk; + bool last_blk; + struct scatterlist sg[2]; + u8 *authkey; + unsigned int authklen; + struct scatterlist result_sg; +}; + +static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash), + struct ahash_alg, halg); + + return container_of(alg, struct qce_alg_template, alg.ahash); +} + +extern const struct qce_algo_ops ahash_ops; + +#endif /* _SHA_H_ */ -- cgit v1.2.3 From c672752d9c8450cbe051cf6bf93bf35b9645b226 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Wed, 25 Jun 2014 19:28:58 +0300 Subject: crypto: qce - Build Qualcomm crypto driver Modify crypto Kconfig and Makefile in order to build the qce driver and adds qce Makefile as well. Signed-off-by: Stanimir Varbanov Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 16 ++++++++++++++++ drivers/crypto/Makefile | 1 + drivers/crypto/qce/Makefile | 6 ++++++ 3 files changed, 23 insertions(+) create mode 100644 drivers/crypto/qce/Makefile (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 107822f43fc3..3e5186e5c50c 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -419,4 +419,20 @@ config CRYPTO_DEV_MXS_DCP will be called mxs-dcp. source "drivers/crypto/qat/Kconfig" + +config CRYPTO_DEV_QCE + tristate "Qualcomm crypto engine accelerator" + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_XTS + select CRYPTO_CTR + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + help + This driver supports Qualcomm crypto engine accelerator + hardware. To compile this driver as a module, choose M here. The + module will be called qcrypto. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index f996ed72569c..3924f93d5774 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -24,3 +24,4 @@ obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ +obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile new file mode 100644 index 000000000000..348dc3173afa --- /dev/null +++ b/drivers/crypto/qce/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o +qcrypto-objs := core.o \ + common.o \ + dma.o \ + sha.o \ + ablkcipher.o -- cgit v1.2.3 From e1f8859ee265fc89bd21b4dca79e8e983a044892 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 26 Jun 2014 13:43:02 +0200 Subject: crypto: ux500 - make interrupt mode plausible The interrupt handler in the ux500 crypto driver has an obviously incorrect way to access the data buffer, which for a while has caused this build warning: ../ux500/cryp/cryp_core.c: In function 'cryp_interrupt_handler': ../ux500/cryp/cryp_core.c:234:5: warning: passing argument 1 of '__fswab32' makes integer from pointer without a cast [enabled by default] writel_relaxed(ctx->indata, ^ In file included from ../include/linux/swab.h:4:0, from ../include/uapi/linux/byteorder/big_endian.h:12, from ../include/linux/byteorder/big_endian.h:4, from ../arch/arm/include/uapi/asm/byteorder.h:19, from ../include/asm-generic/bitops/le.h:5, from ../arch/arm/include/asm/bitops.h:340, from ../include/linux/bitops.h:33, from ../include/linux/kernel.h:10, from ../include/linux/clk.h:16, from ../drivers/crypto/ux500/cryp/cryp_core.c:12: ../include/uapi/linux/swab.h:57:119: note: expected '__u32' but argument is of type 'const u8 *' static inline __attribute_const__ __u32 __fswab32(__u32 val) There are at least two, possibly three problems here: a) when writing into the FIFO, we copy the pointer rather than the actual data we want to give to the hardware b) the data pointer is an array of 8-bit values, while the FIFO is 32-bit wide, so both the read and write access fail to do a proper type conversion c) This seems incorrect for big-endian kernels, on which we need to byte-swap any register access, but not normally FIFO accesses, at least the DMA case doesn't do it either. This converts the bogus loop to use the same readsl/writesl pair that we use for the two other modes (DMA and polling). This is more efficient and consistent, and probably correct for endianess. The bug has existed since the driver was first merged, and was probably never detected because nobody tried to use interrupt mode. It might make sense to backport this fix to stable kernels, depending on how the crypto maintainers feel about that. Signed-off-by: Arnd Bergmann Cc: linux-crypto@vger.kernel.org Cc: Fabio Baltieri Cc: Linus Walleij Cc: Herbert Xu Cc: "David S. Miller" Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/ux500/cryp/cryp_core.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index a999f537228f..92105f3dc8e0 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx) static irqreturn_t cryp_interrupt_handler(int irq, void *param) { struct cryp_ctx *ctx; - int i; + int count; struct cryp_device_data *device_data; if (param == NULL) { @@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) if (cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO)) { if (ctx->outlen / ctx->blocksize > 0) { - for (i = 0; i < ctx->blocksize / 4; i++) { - *(ctx->outdata) = readl_relaxed( - &device_data->base->dout); - ctx->outdata += 4; - ctx->outlen -= 4; - } + count = ctx->blocksize / 4; + + readsl(&device_data->base->dout, ctx->outdata, count); + ctx->outdata += count; + ctx->outlen -= count; if (ctx->outlen == 0) { cryp_disable_irq_src(device_data, @@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) } else if (cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO)) { if (ctx->datalen / ctx->blocksize > 0) { - for (i = 0 ; i < ctx->blocksize / 4; i++) { - writel_relaxed(ctx->indata, - &device_data->base->din); - ctx->indata += 4; - ctx->datalen -= 4; - } + count = ctx->blocksize / 4; + + writesl(&device_data->base->din, ctx->indata, count); + + ctx->indata += count; + ctx->datalen -= count; if (ctx->datalen == 0) cryp_disable_irq_src(device_data, -- cgit v1.2.3 From 178f827a60becdcff67df61cc9f0200fcce93f6c Mon Sep 17 00:00:00 2001 From: Nitesh Narayan Lal Date: Tue, 1 Jul 2014 19:54:54 +0530 Subject: crypto: caam - Enabling multiple caam debug support for C29x platform In the current setup debug file system enables us to debug the operational details for only one CAAM. This patch adds the support for debugging multiple CAAM's. Signed-off-by: Nitesh Narayan Lal Signed-off-by: Vakul Garg Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 1d8e003d2582..c6e9d3b2d502 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -588,7 +588,7 @@ static int caam_probe(struct platform_device *pdev) */ perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; - ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL); + ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL); ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); /* Controller-level - performance monitor counters */ -- cgit v1.2.3 From 58a6535f1a830f55076b82063dc83dc2a9fadf23 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Fri, 4 Jul 2014 17:03:29 +0300 Subject: crypto: qce - fix sparse warnings Fix few sparse warnings of type: - sparse: incorrect type in argument - sparse: incorrect type in initializer Signed-off-by: Stanimir Varbanov Signed-off-by: Herbert Xu --- drivers/crypto/qce/common.c | 15 +++++++++------ drivers/crypto/qce/common.h | 2 +- drivers/crypto/qce/sha.c | 20 ++++++++++---------- 3 files changed, 20 insertions(+), 17 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index 1cd4d5ea8114..1fb5fde7fc03 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -201,7 +201,8 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey, unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); unsigned int xtsdusize; - qce_cpu_to_be32p_array(xtskey, enckey + enckeylen / 2, enckeylen / 2); + qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, + enckeylen / 2); qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); /* xts du size 512B */ @@ -262,7 +263,8 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req, u32 authkey_words = rctx->authklen / sizeof(u32); qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); - qce_write_array(qce, REG_AUTH_KEY0, mackey, authkey_words); + qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey, + authkey_words); } if (IS_CMAC(rctx->flags)) @@ -274,12 +276,13 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req, qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; - qce_write_array(qce, REG_AUTH_IV0, auth, iv_words); + qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words); if (rctx->first_blk) qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); else - qce_write_array(qce, REG_AUTH_BYTECNT0, rctx->byte_count, 2); + qce_write_array(qce, REG_AUTH_BYTECNT0, + (u32 *)rctx->byte_count, 2); auth_cfg = qce_auth_cfg(rctx->flags, 0); @@ -354,7 +357,7 @@ static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, return -EINVAL; } - qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words); + qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words); if (!IS_ECB(flags)) { if (IS_XTS(flags)) @@ -362,7 +365,7 @@ static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, else qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); - qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words); + qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words); } if (IS_ENCRYPT(flags)) diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h index 411b1fc97216..a4addd4f7d6c 100644 --- a/drivers/crypto/qce/common.h +++ b/drivers/crypto/qce/common.h @@ -85,7 +85,7 @@ struct qce_alg_template { struct list_head entry; u32 crypto_alg_type; unsigned long alg_flags; - const __be32 *std_iv; + const u32 *std_iv; union { struct crypto_alg crypto; struct ahash_alg ahash; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 3c33ac9c8cba..f3385934eed2 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -25,11 +25,11 @@ static LIST_HEAD(ahash_algs); -static const __be32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(__be32)] = { +static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 }; -static const __be32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(__be32)] = { +static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 }; @@ -132,7 +132,7 @@ static int qce_ahash_init(struct ahash_request *req) { struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); - const __be32 *std_iv = tmpl->std_iv; + const u32 *std_iv = tmpl->std_iv; memset(rctx, 0, sizeof(*rctx)); rctx->first_blk = true; @@ -156,15 +156,15 @@ static int qce_ahash_export(struct ahash_request *req, void *out) struct sha1_state *out_state = out; out_state->count = rctx->count; - qce_cpu_to_be32p_array(out_state->state, rctx->digest, - digestsize); + qce_cpu_to_be32p_array((__be32 *)out_state->state, + rctx->digest, digestsize); memcpy(out_state->buffer, rctx->buf, blocksize); } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { struct sha256_state *out_state = out; out_state->count = rctx->count; - qce_cpu_to_be32p_array(out_state->state, rctx->digest, - digestsize); + qce_cpu_to_be32p_array((__be32 *)out_state->state, + rctx->digest, digestsize); memcpy(out_state->buf, rctx->buf, blocksize); } else { return -EINVAL; @@ -199,8 +199,8 @@ static int qce_import_common(struct ahash_request *req, u64 in_count, count += SHA_PADDING; } - rctx->byte_count[0] = (__be32)(count & ~SHA_PADDING_MASK); - rctx->byte_count[1] = (__be32)(count >> 32); + rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); + rctx->byte_count[1] = (__force __be32)(count >> 32); qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, digestsize); rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); @@ -454,7 +454,7 @@ struct qce_ahash_def { unsigned int digestsize; unsigned int blocksize; unsigned int statesize; - const __be32 *std_iv; + const u32 *std_iv; }; static const struct qce_ahash_def ahash_def[] = { -- cgit v1.2.3 From 44b933b9e1963a487bd97d2d94932e4cee6cbe9d Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Fri, 4 Jul 2014 17:03:30 +0300 Subject: crypto: qce - add dependancy to Kconfig Make qce crypto driver depend on ARCH_QCOM and make possible to test driver compilation. Signed-off-by: Stanimir Varbanov Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 3e5186e5c50c..5ef9ec9d55b3 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -422,6 +422,7 @@ source "drivers/crypto/qat/Kconfig" config CRYPTO_DEV_QCE tristate "Qualcomm crypto engine accelerator" + depends on ARCH_QCOM || COMPILE_TEST select CRYPTO_AES select CRYPTO_DES select CRYPTO_ECB -- cgit v1.2.3 From 35af64038623865015d1786360cdbbcc2e72d78b Mon Sep 17 00:00:00 2001 From: Ruchika Gupta Date: Mon, 7 Jul 2014 10:42:12 +0530 Subject: crypto: caam - Check for CAAM block presence before registering with crypto layer The layer which registers with the crypto API should check for the presence of the CAAM device it is going to use. If the platform's device tree doesn't have the required CAAM node, the layer should return an error and not register the algorithms with crypto API layer. Signed-off-by: Ruchika Gupta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 29 +++++++++++++++++++++++++++++ drivers/crypto/caam/caamhash.c | 28 ++++++++++++++++++++++++++++ drivers/crypto/caam/caamrng.c | 28 ++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 87d9de48a39e..64c606d9e821 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -2441,8 +2441,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template static int __init caam_algapi_init(void) { + struct device_node *dev_node; + struct platform_device *pdev; + struct device *ctrldev; + void *priv; int i = 0, err = 0; + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + if (!dev_node) { + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); + if (!dev_node) + return -ENODEV; + } + + pdev = of_find_device_by_node(dev_node); + if (!pdev) { + of_node_put(dev_node); + return -ENODEV; + } + + ctrldev = &pdev->dev; + priv = dev_get_drvdata(ctrldev); + of_node_put(dev_node); + + /* + * If priv is NULL, it's probably because the caam driver wasn't + * properly initialized (e.g. RNG4 init failed). Thus, bail out here. + */ + if (!priv) + return -ENODEV; + + INIT_LIST_HEAD(&alg_list); /* register crypto algorithms the device supports */ diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 2ab057b0a6d2..7754df4c3df1 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1793,8 +1793,36 @@ caam_hash_alloc(struct caam_hash_template *template, static int __init caam_algapi_hash_init(void) { + struct device_node *dev_node; + struct platform_device *pdev; + struct device *ctrldev; + void *priv; int i = 0, err = 0; + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + if (!dev_node) { + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); + if (!dev_node) + return -ENODEV; + } + + pdev = of_find_device_by_node(dev_node); + if (!pdev) { + of_node_put(dev_node); + return -ENODEV; + } + + ctrldev = &pdev->dev; + priv = dev_get_drvdata(ctrldev); + of_node_put(dev_node); + + /* + * If priv is NULL, it's probably because the caam driver wasn't + * properly initialized (e.g. RNG4 init failed). Thus, bail out here. + */ + if (!priv) + return -ENODEV; + INIT_LIST_HEAD(&hash_list); /* register crypto algorithms the device supports */ diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 8c07d3153f12..a4afa8a8ef02 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -278,6 +278,34 @@ static void __exit caam_rng_exit(void) static int __init caam_rng_init(void) { struct device *dev; + struct device_node *dev_node; + struct platform_device *pdev; + struct device *ctrldev; + void *priv; + + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + if (!dev_node) { + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); + if (!dev_node) + return -ENODEV; + } + + pdev = of_find_device_by_node(dev_node); + if (!pdev) { + of_node_put(dev_node); + return -ENODEV; + } + + ctrldev = &pdev->dev; + priv = dev_get_drvdata(ctrldev); + of_node_put(dev_node); + + /* + * If priv is NULL, it's probably because the caam driver wasn't + * properly initialized (e.g. RNG4 init failed). Thus, bail out here. + */ + if (!priv) + return -ENODEV; dev = caam_jr_alloc(); if (IS_ERR(dev)) { -- cgit v1.2.3 From ac1a2b49ea3dff611d820043a0c597db3867bee0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 8 Jul 2014 13:59:07 +0300 Subject: crypto: qat - remove an unneeded cast The cast to (unsigned int *) doesn't hurt anything but it is pointless. Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_hal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 903ca88109f7..28da876ee268 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -850,7 +850,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) { pr_err("QAT: invalid instructs inst_num=%d, micro_inst=0x%p\n ", - inst_num, (unsigned int *)micro_inst); + inst_num, micro_inst); return -EINVAL; } /* save current context */ -- cgit v1.2.3 From 126ae9adc1ec8d9006542f1a5e474b0183845e21 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 10 Jul 2014 10:58:35 -0500 Subject: crypto: ccp - Base AXI DMA cache settings on device tree The default cache operations for ARM64 were changed during 3.15. To use coherent operations a "dma-coherent" device tree property is required. If that property is not present in the device tree node then the non-coherent operations are assigned for the device. Add support to the ccp driver to assign the AXI DMA cache settings based on whether the "dma-coherent" property is present in the device node. If present, use settings that work with the caches. If not present, use settings that do not look at the caches. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/crypto/amd-ccp.txt | 3 +++ drivers/crypto/ccp/Kconfig | 1 + drivers/crypto/ccp/ccp-dev.c | 2 +- drivers/crypto/ccp/ccp-dev.h | 4 ++++ drivers/crypto/ccp/ccp-platform.c | 6 ++++++ 5 files changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt index 6e0b11aa8995..8c61183b41e0 100644 --- a/Documentation/devicetree/bindings/crypto/amd-ccp.txt +++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt @@ -7,6 +7,9 @@ Required properties: that services interrupts for this device - interrupts: Should contain the CCP interrupt +Optional properties: +- dma-coherent: Present if dma operations are coherent + Example: ccp@e0100000 { compatible = "amd,ccp-seattle-v1a"; diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 7639ffc36c68..474382d50ec4 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -3,6 +3,7 @@ config CRYPTO_DEV_CCP_DD depends on CRYPTO_DEV_CCP default m select HW_RANDOM + select OF if ARM64 help Provides the interface to use the AMD Cryptographic Coprocessor which can be used to accelerate or offload encryption operations diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index fa1ab10f960f..a7d110652a74 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -364,7 +364,7 @@ int ccp_init(struct ccp_device *ccp) #ifdef CONFIG_ARM64 /* For arm64 set the recommended queue cache settings */ - iowrite32(CACHE_WB_NO_ALLOC, ccp->io_regs + CMD_Q_CACHE_BASE + + iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + (CMD_Q_CACHE_INC * i)); #endif diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 1c5651b09506..62ff35a6b9ec 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -30,6 +30,7 @@ #define TRNG_RETRIES 10 +#define CACHE_NONE 0x00 #define CACHE_WB_NO_ALLOC 0xb7 @@ -255,6 +256,9 @@ struct ccp_device { /* Suspend support */ unsigned int suspending; wait_queue_head_t suspend_queue; + + /* DMA caching attribute support */ + unsigned int axcache; }; diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index 65e58291c668..b0a2806908f1 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "ccp-dev.h" @@ -112,6 +113,11 @@ static int ccp_platform_probe(struct platform_device *pdev) *(dev->dma_mask) = DMA_BIT_MASK(48); dev->coherent_dma_mask = DMA_BIT_MASK(48); + if (of_property_read_bool(dev->of_node, "dma-coherent")) + ccp->axcache = CACHE_WB_NO_ALLOC; + else + ccp->axcache = CACHE_NONE; + dev_set_drvdata(dev, ccp); ret = ccp_init(ccp); -- cgit v1.2.3 From a2ac287e9eaec1f105631fa847b1e9208d117a00 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:47 +0300 Subject: crypto: caam - set coherent_dma_mask Replace dma_set_mask with dma_set_mask_and_coherent, since both streaming and coherent DMA mappings are being used. Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 6 +++--- drivers/crypto/caam/jr.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index c6e9d3b2d502..cedb56500b61 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -453,11 +453,11 @@ static int caam_probe(struct platform_device *pdev) if (sizeof(dma_addr_t) == sizeof(u64)) if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) - dma_set_mask(dev, DMA_BIT_MASK(40)); + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); else - dma_set_mask(dev, DMA_BIT_MASK(36)); + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); else - dma_set_mask(dev, DMA_BIT_MASK(32)); + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); /* * Detect and enable JobRs diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 1d80bd3636c5..50cd1b9af2ba 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -476,11 +476,11 @@ static int caam_jr_probe(struct platform_device *pdev) if (sizeof(dma_addr_t) == sizeof(u64)) if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) - dma_set_mask(jrdev, DMA_BIT_MASK(40)); + dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); else - dma_set_mask(jrdev, DMA_BIT_MASK(36)); + dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36)); else - dma_set_mask(jrdev, DMA_BIT_MASK(32)); + dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); /* Identify the interrupt */ jrpriv->irq = irq_of_parse_and_map(nprop, 0); -- cgit v1.2.3 From 71c65f7c90a176877ad1aa87b752217db61148a8 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:48 +0300 Subject: crypto: caam - fix typo in dma_mapping_error dma_mapping_error checks for an incorrect DMA address: s/ctx->sh_desc_enc_dma/ctx->sh_desc_dec_dma Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 64c606d9e821..31f2e10b5102 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -786,7 +786,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { dev_err(jrdev, "unable to map shared descriptor\n"); return -ENOMEM; } -- cgit v1.2.3 From ce572085282128d57324aabf415673dfbfa32d54 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:49 +0300 Subject: crypto: caam - fix "failed to check map error" DMA warnings Use dma_mapping_error for every dma_map_single / dma_map_page. Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 34 +++++++++++-- drivers/crypto/caam/caamhash.c | 106 ++++++++++++++++++++++++++++++++++------- drivers/crypto/caam/caamrng.c | 51 ++++++++++++++++---- 3 files changed, 163 insertions(+), 28 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 31f2e10b5102..694df4685920 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1313,8 +1313,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, DMA_FROM_DEVICE, dst_chained); } - /* Check if data are contiguous */ iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + return ERR_PTR(-ENOMEM); + } + + /* Check if data are contiguous */ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) { @@ -1369,6 +1374,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, } edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return ERR_PTR(-ENOMEM); + } return edesc; } @@ -1494,8 +1503,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request DMA_FROM_DEVICE, dst_chained); } - /* Check if data are contiguous */ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + return ERR_PTR(-ENOMEM); + } + + /* Check if data are contiguous */ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) contig &= ~GIV_SRC_CONTIG; @@ -1559,6 +1573,10 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request } edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return ERR_PTR(-ENOMEM); + } return edesc; } @@ -1650,11 +1668,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request DMA_FROM_DEVICE, dst_chained); } + iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + return ERR_PTR(-ENOMEM); + } + /* * Check if iv can be contiguous with source and destination. * If so, include it. If not, create scatterlist. */ - iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) iv_contig = true; else @@ -1693,6 +1716,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return ERR_PTR(-ENOMEM); + } + edesc->iv_dma = iv_dma; #ifdef DEBUG diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 7754df4c3df1..39f7a933c8fa 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -137,13 +137,20 @@ struct caam_hash_state { /* Common job descriptor seq in/out ptr routines */ /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ -static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, - struct caam_hash_state *state, - int ctx_len) +static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, + struct caam_hash_state *state, + int ctx_len) { state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, state->ctx_dma)) { + dev_err(jrdev, "unable to map ctx\n"); + return -ENOMEM; + } + append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); + + return 0; } /* Map req->result, and append seq_out_ptr command that points to it */ @@ -201,14 +208,19 @@ try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, } /* Map state->caam_ctx, and add it to link table */ -static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, - struct caam_hash_state *state, - int ctx_len, - struct sec4_sg_entry *sec4_sg, - u32 flag) +static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, + struct caam_hash_state *state, int ctx_len, + struct sec4_sg_entry *sec4_sg, u32 flag) { state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); + if (dma_mapping_error(jrdev, state->ctx_dma)) { + dev_err(jrdev, "unable to map ctx\n"); + return -ENOMEM; + } + dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); + + return 0; } /* Common shared descriptor commands */ @@ -809,8 +821,10 @@ static int ahash_update_ctx(struct ahash_request *req) edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, - edesc->sec4_sg, DMA_BIDIRECTIONAL); + ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_BIDIRECTIONAL); + if (ret) + return ret; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, @@ -839,6 +853,10 @@ static int ahash_update_ctx(struct ahash_request *req) edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + to_hash, LDST_SGF); @@ -914,8 +932,10 @@ static int ahash_final_ctx(struct ahash_request *req) DESC_JOB_IO_LEN; edesc->src_nents = 0; - ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, - DMA_TO_DEVICE); + ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_TO_DEVICE); + if (ret) + return ret; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, @@ -924,12 +944,20 @@ static int ahash_final_ctx(struct ahash_request *req) edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, LDST_SGF); edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -992,8 +1020,10 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; - ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, - DMA_TO_DEVICE); + ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_TO_DEVICE); + if (ret) + return ret; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, @@ -1004,12 +1034,20 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen + req->nbytes, LDST_SGF); edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1070,6 +1108,10 @@ static int ahash_digest(struct ahash_request *req) sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } src_dma = edesc->sec4_sg_dma; options = LDST_SGF; } else { @@ -1080,6 +1122,10 @@ static int ahash_digest(struct ahash_request *req) edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1128,11 +1174,19 @@ static int ahash_final_no_ctx(struct ahash_request *req) init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map src\n"); + return -ENOMEM; + } append_seq_in_ptr(desc, state->buf_dma, buflen, 0); edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } edesc->src_nents = 0; #ifdef DEBUG @@ -1219,10 +1273,16 @@ static int ahash_update_no_ctx(struct ahash_request *req) edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); - map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); + if (ret) + return ret; #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1311,12 +1371,20 @@ static int ahash_finup_no_ctx(struct ahash_request *req) edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + req->nbytes, LDST_SGF); edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); + if (dma_mapping_error(jrdev, edesc->dst_dma)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1393,6 +1461,10 @@ static int ahash_update_first(struct ahash_request *req) edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } src_dma = edesc->sec4_sg_dma; options = LDST_SGF; } else { @@ -1410,7 +1482,9 @@ static int ahash_update_first(struct ahash_request *req) append_seq_in_ptr(desc, src_dma, to_hash, options); - map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); + if (ret) + return ret; #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index a4afa8a8ef02..ae31e555793c 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -185,7 +185,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) max - copied_idx, false); } -static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) +static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) { struct device *jrdev = ctx->jrdev; u32 *desc = ctx->sh_desc; @@ -203,13 +203,18 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } #ifdef DEBUG print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif + return 0; } -static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) +static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) { struct device *jrdev = ctx->jrdev; struct buf_data *bd = &ctx->bufs[buf_id]; @@ -220,12 +225,17 @@ static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) HDR_REVERSE); bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, bd->addr)) { + dev_err(jrdev, "unable to map dst\n"); + return -ENOMEM; + } append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); #ifdef DEBUG print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif + return 0; } static void caam_cleanup(struct hwrng *rng) @@ -242,24 +252,44 @@ static void caam_cleanup(struct hwrng *rng) rng_unmap_ctx(rng_ctx); } -static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) +static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) { struct buf_data *bd = &ctx->bufs[buf_id]; + int err; + + err = rng_create_job_desc(ctx, buf_id); + if (err) + return err; - rng_create_job_desc(ctx, buf_id); atomic_set(&bd->empty, BUF_EMPTY); submit_job(ctx, buf_id == ctx->current_buf); wait_for_completion(&bd->filled); + + return 0; } -static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) +static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) { + int err; + ctx->jrdev = jrdev; - rng_create_sh_desc(ctx); + + err = rng_create_sh_desc(ctx); + if (err) + return err; + ctx->current_buf = 0; ctx->cur_buf_idx = 0; - caam_init_buf(ctx, 0); - caam_init_buf(ctx, 1); + + err = caam_init_buf(ctx, 0); + if (err) + return err; + + err = caam_init_buf(ctx, 1); + if (err) + return err; + + return 0; } static struct hwrng caam_rng = { @@ -282,6 +312,7 @@ static int __init caam_rng_init(void) struct platform_device *pdev; struct device *ctrldev; void *priv; + int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!dev_node) { @@ -315,7 +346,9 @@ static int __init caam_rng_init(void) rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); if (!rng_ctx) return -ENOMEM; - caam_init_rng(rng_ctx, dev); + err = caam_init_rng(rng_ctx, dev); + if (err) + return err; dev_info(dev, "registering rng-caam\n"); return hwrng_register(&caam_rng); -- cgit v1.2.3 From e11aa9f1351f150ee9f0166bffc5e007c81c1364 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:50 +0300 Subject: crypto: caam - fix DMA unmapping error in hash_digest_key Key being hashed is unmapped using the digest size instead of initial length: caam_jr ffe301000.jr: DMA-API: device driver frees DMA memory with different size [device address=0x000000002eeedac0] [map size=80 bytes] [unmap size=20 bytes] ------------[ cut here ]------------ WARNING: at lib/dma-debug.c:1090 Modules linked in: caamhash(+) CPU: 0 PID: 1327 Comm: cryptomgr_test Not tainted 3.16.0-rc1 #23 task: eebda5d0 ti: ee26a000 task.ti: ee26a000 NIP: c0288790 LR: c0288790 CTR: c02d7020 REGS: ee26ba30 TRAP: 0700 Not tainted (3.16.0-rc1) MSR: 00021002 CR: 44022082 XER: 00000000 GPR00: c0288790 ee26bae0 eebda5d0 0000009f c1de3478 c1de382c 00000000 00021002 GPR08: 00000007 00000000 01660000 0000012f 82022082 00000000 c07a1900 eeda29c0 GPR16: 00000000 c61deea0 000c49a0 00000260 c07e1e10 c0da1180 00029002 c0d9ef08 GPR24: c07a0000 c07a4acc ee26bb38 ee2765c0 00000014 ee130210 00000000 00000014 NIP [c0288790] check_unmap+0x640/0xab0 LR [c0288790] check_unmap+0x640/0xab0 Call Trace: [ee26bae0] [c0288790] check_unmap+0x640/0xab0 (unreliable) [ee26bb30] [c0288c78] debug_dma_unmap_page+0x78/0x90 [ee26bbb0] [f929c3d4] ahash_setkey+0x374/0x720 [caamhash] [ee26bc30] [c022fec8] __test_hash+0x228/0x6c0 [ee26bde0] [c0230388] test_hash+0x28/0xb0 [ee26be00] [c0230458] alg_test_hash+0x48/0xc0 [ee26be20] [c022fa94] alg_test+0x114/0x2e0 [ee26bea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [ee26beb0] [c00497a4] kthread+0xc4/0xe0 [ee26bf40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de03e8 83da0020 3c60c06d 83fa0024 3863f520 813b0020 815b0024 80fa0018 811a001c 93c10008 93e1000c 4830cf6d <0fe00000> 3c60c06d 3863f0f4 4830cf5d ---[ end trace db1fae088c75c26c ]--- Mapped at: [] ahash_setkey+0xfc/0x720 [caamhash] [] __test_hash+0x228/0x6c0 [] test_hash+0x28/0xb0 [] alg_test_hash+0x48/0xc0 [] alg_test+0x114/0x2e0 Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 39f7a933c8fa..9ee4e0478220 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -499,11 +499,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, digestsize, 1); #endif } - *keylen = digestsize; - dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); + *keylen = digestsize; + kfree(desc); return ret; -- cgit v1.2.3 From ef62b2310b4c783a70eba78f29695687d8cdc8df Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:51 +0300 Subject: crypto: caam - fix DMA direction mismatch in ahash_done_ctx_dst caam_jr ffe301000.jr: DMA-API: device driver frees DMA memory with different direction [device address=0x00000000062ad1ac] [size=28 bytes] [mapped with DMA_FROM_DEVICE] [unmapped with DMA_TO_DEVICE] ------------[ cut here ]------------ WARNING: at lib/dma-debug.c:1131 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 3.16.0-rc1 #23 task: c0789380 ti: effd2000 task.ti: c07d6000 NIP: c02885cc LR: c02885cc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00021002 CR: 44048082 XER: 00000000 GPR00: c02885cc effd3e00 c0789380 000000c6 c1de3478 c1de382c 00000000 00021002 GPR08: 00000007 00000000 01660000 0000012f 84048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c eee567e0 c07e1e10 c0da1180 00029002 c0d96708 GPR24: c07a0000 c07a4acc effd3e58 ee29b140 0000001c ee130210 00000000 c0d96700 NIP [c02885cc] check_unmap+0x47c/0xab0 LR [c02885cc] check_unmap+0x47c/0xab0 Call Trace: [effd3e00] [c02885cc] check_unmap+0x47c/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f9350974] ahash_done_ctx_dst+0xa4/0x200 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [c07d7d50] [c000489c] do_IRQ+0x8c/0x110 [c07d7d70] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [c07d7e40] [c0053084] finish_task_switch+0x74/0x130 [c07d7e60] [c058f278] __schedule+0x238/0x620 [c07d7f70] [c058fb50] schedule_preempt_disabled+0x10/0x20 [c07d7f80] [c00686a0] cpu_startup_entry+0x100/0x1b0 [c07d7fb0] [c074793c] start_kernel+0x338/0x34c [c07d7ff0] [c00003d8] set_ivor+0x140/0x17c Instruction dump: 7d495214 7d294214 806a0010 80c90010 811a001c 813a0020 815a0024 90610008 3c60c06d 90c1000c 3863f764 4830d131 <0fe00000> 3c60c06d 3863f0f4 4830d121 ---[ end trace db1fae088c75c270 ]--- Mapped at: [] ahash_update_first+0x5b4/0xba0 [caamhash] [] __test_hash+0x288/0x6c0 [] test_hash+0x28/0xb0 [] alg_test_hash+0x94/0xc0 [] alg_test+0x114/0x2e0 Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 9ee4e0478220..4bb6ff46e339 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -753,7 +753,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); kfree(edesc); #ifdef DEBUG -- cgit v1.2.3 From bc9e05f9e770b9c6a1bcc0cdee676e74e5a04fd2 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:52 +0300 Subject: crypto: caam - fix DMA direction mismatch in ahash_done_ctx_src caam_jr ffe301000.jr: DMA-API: device driver frees DMA memory with different direction [device address=0x0000000006271dac] [size=28 bytes] [mapped with DMA_TO_DEVICE] [unmapped with DMA_FROM_DEVICE] ------------[ cut here ]------------ WARNING: at lib/dma-debug.c:1131 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 3.16.0-rc1 #23 task: c0789380 ti: effd2000 task.ti: c07d6000 NIP: c02885cc LR: c02885cc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00021002 CR: 44048082 XER: 00000000 GPR00: c02885cc effd3e00 c0789380 000000c6 c1de3478 c1de382c 00000000 00021002 GPR08: 00000007 00000000 01660000 0000012f 84048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c c62517a0 c07e1e10 c0da1180 00029002 c0d95f88 GPR24: c07a0000 c07a4acc effd3e58 ee322bc0 0000001c ee130210 00000000 c0d95f80 NIP [c02885cc] check_unmap+0x47c/0xab0 LR [c02885cc] check_unmap+0x47c/0xab0 Call Trace: [effd3e00] [c02885cc] check_unmap+0x47c/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f9624d84] ahash_done_ctx_src+0xa4/0x200 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [c07d7d50] [c000489c] do_IRQ+0x8c/0x110 [c07d7d70] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [c07d7e40] [c0053084] finish_task_switch+0x74/0x130 [c07d7e60] [c058f278] __schedule+0x238/0x620 [c07d7f70] [c058fb50] schedule_preempt_disabled+0x10/0x20 [c07d7f80] [c00686a0] cpu_startup_entry+0x100/0x1b0 [c07d7fb0] [c074793c] start_kernel+0x338/0x34c [c07d7ff0] [c00003d8] set_ivor+0x140/0x17c Instruction dump: 7d495214 7d294214 806a0010 80c90010 811a001c 813a0020 815a0024 90610008 3c60c06d 90c1000c 3863f764 4830d131 <0fe00000> 3c60c06d 3863f0f4 4830d121 ---[ end trace db1fae088c75c280 ]--- Mapped at: [] ahash_final_ctx+0x14c/0x7b0 [caamhash] [] __test_hash+0x2ac/0x6c0 [] test_hash+0x28/0xb0 [] alg_test_hash+0x94/0xc0 [] alg_test+0x114/0x2e0 Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 4bb6ff46e339..e94dd4518cfb 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -718,7 +718,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); kfree(edesc); #ifdef DEBUG -- cgit v1.2.3 From 45e9af78b1abb00b7c394a7ce4e72584c3ca0eb8 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:53 +0300 Subject: crypto: caam - fix uninitialized S/G table size in ahash_digest Not initializing edesc->sec4_sg_bytes correctly causes ahash_done callback to free unallocated DMA memory: caam_jr ffe301000.jr: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x300900000000b44d] [size=46158 bytes] WARNING: at lib/dma-debug.c:1080 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 1358 Comm: cryptomgr_test Tainted: G W 3.16.0-rc1 #23 task: eed04250 ti: effd2000 task.ti: c6046000 NIP: c02889fc LR: c02889fc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00029002 CR: 44048082 XER: 00000000 GPR00: c02889fc effd3e00 eed04250 00000091 c1de3478 c1de382c 00000000 00029002 GPR08: 00000007 00000000 01660000 00000000 22048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c ee2497e0 c07e1e10 c0da1180 00029002 c0d912c8 GPR24: 00000014 ee2497c0 effd3e58 00000000 c078ad4c ee130210 30090000 0000b44d NIP [c02889fc] check_unmap+0x8ac/0xab0 LR [c02889fc] check_unmap+0x8ac/0xab0 Call Trace: [effd3e00] [c02889fc] check_unmap+0x8ac/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f9404fec] ahash_done+0x11c/0x190 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [c6047ae0] [c000489c] do_IRQ+0x8c/0x110 [c6047b00] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [c6047bd0] [c0590158] wait_for_common+0xb8/0x170 [c6047c10] [c059024c] wait_for_completion_interruptible+0x1c/0x40 [c6047c20] [c022fc78] do_one_async_hash_op.isra.2.part.3+0x18/0x40 [c6047c30] [c022ff98] __test_hash+0x2f8/0x6c0 [c6047de0] [c0230388] test_hash+0x28/0xb0 [c6047e00] [c0230458] alg_test_hash+0x48/0xc0 [c6047e20] [c022fa94] alg_test+0x114/0x2e0 [c6047ea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [c6047eb0] [c00497a4] kthread+0xc4/0xe0 [c6047f40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de01c8 80a9002c 2f850000 40fe0008 80a90008 80fa0018 3c60c06d 811a001c 3863f4a4 813a0020 815a0024 4830cd01 <0fe00000> 81340048 2f890000 40feff48 Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index e94dd4518cfb..126d80c23571 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1097,6 +1097,7 @@ static int ahash_digest(struct ahash_request *req) } edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; + edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->src_nents = src_nents; edesc->chained = chained; -- cgit v1.2.3 From 76b99080ccc964163b4567fac2bb8619f5ed789f Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:54 +0300 Subject: crypto: caam - fix uninitialized edesc->dst_dma field dst_dma not being properly initialized causes ahash_done_ctx_dst to try to free unallocated DMA memory: caam_jr ffe301000.jr: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x0000000006513340] [size=28 bytes] WARNING: at lib/dma-debug.c:1080 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 1373 Comm: cryptomgr_test Tainted: G W 3.16.0-rc1 #23 task: ee23e350 ti: effd2000 task.ti: ee1f6000 NIP: c02889fc LR: c02889fc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00029002 CR: 44048082 XER: 00000000 GPR00: c02889fc effd3e00 ee23e350 0000008e c1de3478 c1de382c 00000000 00029002 GPR08: 00000007 00000000 01660000 00000000 24048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c eeb4a7e0 c07e1e10 c0da1180 00029002 c0d9b3c8 GPR24: eeb4a7c0 00000000 effd3e58 00000000 c078ad4c ee130210 00000000 06513340 NIP [c02889fc] check_unmap+0x8ac/0xab0 LR [c02889fc] check_unmap+0x8ac/0xab0 Call Trace: [effd3e00] [c02889fc] check_unmap+0x8ac/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f94b89ec] ahash_done_ctx_dst+0x11c/0x200 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [ee1f7ae0] [c000489c] do_IRQ+0x8c/0x110 [ee1f7b00] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [ee1f7bd0] [c0590158] wait_for_common+0xb8/0x170 [ee1f7c10] [c059024c] wait_for_completion_interruptible+0x1c/0x40 [ee1f7c20] [c022fc78] do_one_async_hash_op.isra.2.part.3+0x18/0x40 [ee1f7c30] [c022ffb8] __test_hash+0x318/0x6c0 [ee1f7de0] [c0230388] test_hash+0x28/0xb0 [ee1f7e00] [c02304a4] alg_test_hash+0x94/0xc0 [ee1f7e20] [c022fa94] alg_test+0x114/0x2e0 [ee1f7ea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [ee1f7eb0] [c00497a4] kthread+0xc4/0xe0 [ee1f7f40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de01c8 80a9002c 2f850000 40fe0008 80a90008 80fa0018 3c60c06d 811a001c 3863f4a4 813a0020 815a0024 4830cd01 <0fe00000> 81340048 2f890000 40feff48 Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 126d80c23571..44be5ad38ad7 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1255,6 +1255,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; + edesc->dst_dma = 0; state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, *buflen); @@ -1454,6 +1455,7 @@ static int ahash_update_first(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; + edesc->dst_dma = 0; if (src_nents) { sg_to_sec4_sg_last(req->src, src_nents, -- cgit v1.2.3 From de0e35ec2b72be30892f28a939c358af1df4fa2c Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:34:55 +0300 Subject: crypto: caam - fix uninitialized state->buf_dma field state->buf_dma not being initialized can cause try_buf_map_to_sec4_sg to try to free unallocated DMA memory: caam_jr ffe301000.jr: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x000000002eb15068] [size=0 bytes] WARNING: at lib/dma-debug.c:1080 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 1387 Comm: cryptomgr_test Tainted: G W 3.16.0-rc1 #23 task: eed24e90 ti: eebd0000 task.ti: eebd0000 NIP: c02889fc LR: c02889fc CTR: c02d7020 REGS: eebd1a50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00029002 CR: 44042082 XER: 00000000 GPR00: c02889fc eebd1b00 eed24e90 0000008d c1de3478 c1de382c 00000000 00029002 GPR08: 00000007 00000000 01660000 00000000 24042082 00000000 c07a1900 eeda2a40 GPR16: 005d62a0 c078ad4c 00000000 eeb15068 c07e1e10 c0da1180 00029002 c0d97408 GPR24: c62497a0 00000014 eebd1b58 00000000 c078ad4c ee130210 00000000 2eb15068 NIP [c02889fc] check_unmap+0x8ac/0xab0 LR [c02889fc] check_unmap+0x8ac/0xab0 Call Trace: [eebd1b00] [c02889fc] check_unmap+0x8ac/0xab0 (unreliable) --- Exception: 0 at (null) LR = (null) [eebd1b50] [c0288c78] debug_dma_unmap_page+0x78/0x90 (unreliable) [eebd1bd0] [f956f738] ahash_final_ctx+0x6d8/0x7b0 [caamhash] [eebd1c30] [c022ff4c] __test_hash+0x2ac/0x6c0 [eebd1de0] [c0230388] test_hash+0x28/0xb0 [eebd1e00] [c02304a4] alg_test_hash+0x94/0xc0 [eebd1e20] [c022fa94] alg_test+0x114/0x2e0 [eebd1ea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [eebd1eb0] [c00497a4] kthread+0xc4/0xe0 [eebd1f40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de01c8 80a9002c 2f850000 40fe0008 80a90008 80fa0018 3c60c06d 811a001c 3863f4a4 813a0020 815a0024 4830cd01 <0fe00000> 81340048 2f890000 40feff48 Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 44be5ad38ad7..b464d03ebf40 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1536,6 +1536,7 @@ static int ahash_init(struct ahash_request *req) state->final = ahash_final_no_ctx; state->current_buf = 0; + state->buf_dma = 0; return 0; } -- cgit v1.2.3 From a60384dfff3df41e86431974e5f9807773332d9e Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 11 Jul 2014 15:46:58 +0300 Subject: crypto: caam - set DK (Decrypt Key) bit only for AES accelerator AES currently shares descriptor creation functions with DES and 3DES. DK bit is set in all cases, however it is valid only for the AES accelerator. Signed-off-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 694df4685920..a80ea853701d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type) { u32 *jump_cmd, *uncond_jump_cmd; + /* DK bit is valid only for AES */ + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { + append_operation(desc, type | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + return; + } + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); append_operation(desc, type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); -- cgit v1.2.3 From 71d932d99d7e6c644172e1d686bb41d667d8fa37 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Sun, 13 Jul 2014 11:01:38 +0800 Subject: crypto: qce - Let 'DEV_QCE' depend on both HAS_DMA and HAS_IOMEM 'DEV_QCE' needs both HAS_DMA and HAS_IOMEM, so let it depend on them. The related error (with allmodconfig under score): MODPOST 1365 modules ERROR: "devm_ioremap_resource" [drivers/crypto/qce/qcrypto.ko] undefined! ERROR: "dma_map_sg" [drivers/crypto/qce/qcrypto.ko] undefined! ERROR: "dma_set_mask" [drivers/crypto/qce/qcrypto.ko] undefined! ERROR: "dma_supported" [drivers/crypto/qce/qcrypto.ko] undefined! ERROR: "dma_unmap_sg" [drivers/crypto/qce/qcrypto.ko] undefined! Signed-off-by: Chen Gang Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5ef9ec9d55b3..2fb0fdfc87df 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -422,7 +422,7 @@ source "drivers/crypto/qat/Kconfig" config CRYPTO_DEV_QCE tristate "Qualcomm crypto engine accelerator" - depends on ARCH_QCOM || COMPILE_TEST + depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM select CRYPTO_AES select CRYPTO_DES select CRYPTO_ECB -- cgit v1.2.3 From 8f1da7b945b65513fb02b75ce25040c67ce32726 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Mon, 21 Jul 2014 16:03:21 +0300 Subject: crypto: caam - fix DECO RSR polling RSR (Request Source Register) is not used when virtualization is disabled, thus don't poll for Valid bit. Besides this, if used, timeout has to be reinitialized. Signed-off-by: Horia Geanta Acked-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index cedb56500b61..3cade79ea41e 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -89,12 +89,15 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, /* Set the bit to request direct access to DECO0 */ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; - if (ctrlpriv->virt_en == 1) + if (ctrlpriv->virt_en == 1) { setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); - while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) && - --timeout) - cpu_relax(); + while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) && + --timeout) + cpu_relax(); + + timeout = 100000; + } setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); -- cgit v1.2.3 From 4839ddcaba266bcf37dfb00757f339ab9de4710e Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 23 Jul 2014 09:45:19 -0500 Subject: crypto: ccp - Remove "select OF" from Kconfig The addition of the "select OF if ARM64" has led to a Kconfig recursive dependency error when "make ARCH=sh rsk7269_defconfig" was run. Since OF is selected by ARM64 and the of_property_read_bool is defined no matter what, delete the Kconfig line that selects OF. Reported-by: kbuild test robot Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 474382d50ec4..7639ffc36c68 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -3,7 +3,6 @@ config CRYPTO_DEV_CCP_DD depends on CRYPTO_DEV_CCP default m select HW_RANDOM - select OF if ARM64 help Provides the interface to use the AMD Cryptographic Coprocessor which can be used to accelerate or offload encryption operations -- cgit v1.2.3 From 341b2a3541c21a2c60daf87bcd6d6e8c4f1aed62 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:09 -0700 Subject: crypto: qat - remove unnecessary return codes Remove unnecessary return code variables and change function types accordingly. Signed-off-by: Bruce Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_uclo.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index dd4e0d3c323a..f22b48889d33 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -1088,14 +1088,13 @@ static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, *uword = fill; } -static int qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, - struct icp_qat_uclo_encap_page - *encap_page, unsigned int ae) +static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uclo_encap_page + *encap_page, unsigned int ae) { unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; uint64_t fill_pat; - int status = 0; /* load the page starting at appropriate ustore address */ /* get fill-pattern from an image -- they are all the same */ @@ -1126,18 +1125,15 @@ static int qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, uw_relative_addr += cpylen; words_num -= cpylen; } - return status; } -static int -qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, - struct icp_qat_uof_image *image) +static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_image *image) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ctx_mask, s; struct icp_qat_uclo_page *page; unsigned char ae; - int retval = 0; int ctx; if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) @@ -1160,8 +1156,7 @@ qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, page = obj_handle->ae_data[ae].ae_slices[s].page; if (!page->encap_page->def_page) continue; - if (qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae)) - return -EINVAL; + qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); page = obj_handle->ae_data[ae].ae_slices[s].page; for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) @@ -1172,7 +1167,6 @@ qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, image->entry_address); } - return retval; } int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) @@ -1187,9 +1181,8 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) return -EINVAL; if (qat_uclo_init_ustore(handle, &(obj_handle->ae_uimage[i]))) return -EINVAL; - if (qat_uclo_wr_uimage_pages(handle, - obj_handle->ae_uimage[i].img_ptr)) - return -EINVAL; + qat_uclo_wr_uimage_pages(handle, + obj_handle->ae_uimage[i].img_ptr); } return 0; } -- cgit v1.2.3 From 53275baa0368945ba4f385f2e41918bedd847dce Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:15 -0700 Subject: crypto: qat - checkpatch blank lines Fix new checkpatch hits: CHECK:LINE_SPACING: Please use a blank line after function/struct/union/enum declarations Signed-off-by: Bruce Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_transport_internal.h | 3 +++ drivers/crypto/qat/qat_common/icp_qat_fw_la.h | 1 + 2 files changed, 4 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index 4eb8969243bf..f854bac276b0 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -104,12 +104,15 @@ static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) { return 0; } + #define adf_bank_debugfs_rm(bank) do {} while (0) + static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) { return 0; } + #define adf_ring_debugfs_rm(ring) do {} while (0) #endif #endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h index d1d4802a857f..c8d26697e8ea 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h @@ -358,6 +358,7 @@ struct icp_qat_fw_la_resp { uint64_t opaque_data; uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; }; + #define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) -- cgit v1.2.3 From a7d217617b8528fcf92274be03f20ff2f5ec3dc4 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:21 -0700 Subject: crypto: qat - remove unneeded header Remove include of a no longer necessary header file. Signed-off-by: Bruce Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_cfg.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index 42790760ade1..aba7f1d043fb 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -49,7 +49,6 @@ #include #include #include "adf_accel_devices.h" -#include "adf_common_drv.h" #include "adf_cfg.h" static DEFINE_MUTEX(qat_cfg_read_lock); -- cgit v1.2.3 From 45cff2608007ab28047cadb33e85b58c40b447ce Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:26 -0700 Subject: crypto: qat - remove unnecessary parentheses Resolve new strict checkpatch hits CHECK:UNNECESSARY_PARENTHESES: Unnecessary parentheses around ... Signed-off-by: Bruce Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/icp_qat_hw.h | 6 ++-- drivers/crypto/qat/qat_common/qat_algs.c | 2 +- drivers/crypto/qat/qat_common/qat_uclo.c | 54 +++++++++++++++--------------- 3 files changed, 31 insertions(+), 31 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h index cc7ec4071929..5031f8c10d75 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h @@ -120,14 +120,14 @@ struct icp_qat_hw_auth_config { #define QAT_AUTH_ALGO_SHA3_BITPOS 22 #define QAT_AUTH_ALGO_SHA3_MASK 0x3 #define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ - ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ - (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ + (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ + ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ QAT_AUTH_ALGO_SHA3_BITPOS) | \ (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ - (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) + ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) struct icp_qat_hw_auth_counter { __be32 counter; diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 946686f83660..59df48872955 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -759,7 +759,7 @@ void qat_alg_callback(void *resp) qat_alg_free_bufl(inst, qat_req); if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) res = -EBADMSG; - areq->base.complete(&(areq->base), res); + areq->base.complete(&areq->base, res); } static int qat_alg_dec(struct aead_request *areq) diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index f22b48889d33..17a9954f831e 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -79,11 +79,11 @@ static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, } else { ae_slice->ctx_mask_assigned = 0; } - ae_slice->regions = kzalloc(sizeof(*(ae_slice->regions)), GFP_KERNEL); - if (!(ae_slice->regions)) + ae_slice->regions = kzalloc(sizeof(*ae_slice->regions), GFP_KERNEL); + if (!ae_slice->regions) return -ENOMEM; - ae_slice->page = kzalloc(sizeof(*(ae_slice->page)), GFP_KERNEL); - if (!(ae_slice->page)) + ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL); + if (!ae_slice->page) goto out_err; page = ae_slice->page; page->encap_page = encap_image->page; @@ -248,7 +248,7 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, pr_err("QAT: Memory scope for init_mem error\n"); return -EINVAL; } - str = qat_uclo_get_string(&(obj_handle->str_table), init_mem->sym_name); + str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); if (!str) { pr_err("QAT: AE name assigned in uof init table is NULL\n"); return -EINVAL; @@ -257,7 +257,7 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, pr_err("QAT: Parse num for AE number failed\n"); return -EINVAL; } - if (!test_bit(*ae, (unsigned long *)&(handle->hal_handle->ae_mask))) { + if (!test_bit(*ae, (unsigned long *)&handle->hal_handle->ae_mask)) { pr_err("QAT: ae %d to be init is fused off\n", *ae); return -EINVAL; } @@ -332,7 +332,7 @@ static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, ICP_QAT_UCLO_MAX_LMEM_REG, &ae)) return -EINVAL; if (qat_uclo_create_batch_init_list(handle, init_mem, ae, - &(obj_handle->lm_init_tab[ae]))) + &obj_handle->lm_init_tab[ae])) return -EINVAL; return 0; } @@ -347,7 +347,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) return -EINVAL; if (qat_uclo_create_batch_init_list(handle, init_mem, ae, - &(obj_handle->umem_init_tab[ae]))) + &obj_handle->umem_init_tab[ae])) return -EINVAL; /* set the highest ustore address referenced */ uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; @@ -425,7 +425,7 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, page = image->page; for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { - if (!test_bit(ae, (unsigned long *)&(uof_image->ae_assigned))) + if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) continue; ustore_size = obj_handle->ae_data[ae].eff_ustore_size; patt_pos = page->beg_addr_p + page->micro_words_num; @@ -486,8 +486,8 @@ static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, for (i = 0; i < obj_hdr->num_chunks; i++) { if ((cur < (void *)&chunk_hdr[i]) && - !(strncmp(chunk_hdr[i].chunk_id, chunk_id, - ICP_QAT_UOF_OBJID_LEN))) { + !strncmp(chunk_hdr[i].chunk_id, chunk_id, + ICP_QAT_UOF_OBJID_LEN)) { return &chunk_hdr[i]; } } @@ -532,8 +532,8 @@ qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, file_chunk = (struct icp_qat_uof_filechunkhdr *) (buf + sizeof(struct icp_qat_uof_filehdr)); for (i = 0; i < file_hdr->num_chunks; i++) { - if (!(strncmp(file_chunk->chunk_id, chunk_id, - ICP_QAT_UOF_OBJID_LEN))) { + if (!strncmp(file_chunk->chunk_id, chunk_id, + ICP_QAT_UOF_OBJID_LEN)) { chunk = buf + file_chunk->offset; if (file_chunk->checksum != qat_uclo_calc_str_checksum( (char *)chunk, file_chunk->size)) @@ -692,12 +692,12 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; for (ae = 0; ae <= max_ae; ae++) { - if (!test_bit(ae, (unsigned long *) - &(handle->hal_handle->ae_mask))) + if (!test_bit(ae, + (unsigned long *)&handle->hal_handle->ae_mask)) continue; for (i = 0; i < obj_handle->uimage_num; i++) { if (!test_bit(ae, (unsigned long *) - &(obj_handle->ae_uimage[i].img_ptr->ae_assigned))) + &obj_handle->ae_uimage[i].img_ptr->ae_assigned)) continue; mflag = 1; if (qat_uclo_init_ae_data(obj_handle, ae, i)) @@ -898,12 +898,12 @@ static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { if (!test_bit(ae, - (unsigned long *)&(handle->hal_handle->ae_mask))) + (unsigned long *)&handle->hal_handle->ae_mask)) continue; - ae_data = &(obj_handle->ae_data[ae]); + ae_data = &obj_handle->ae_data[ae]; for (s = 0; s < ae_data->slice_num && s < ICP_QAT_UCLO_MAX_CTX; s++) { - if (!(obj_handle->ae_data[ae].ae_slices[s].encap_image)) + if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) continue; uof_image = ae_data->ae_slices[s].encap_image->img_ptr; if (qat_hal_set_ae_ctx_mode(handle, ae, @@ -968,9 +968,9 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) return -EINVAL; } obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; - if (!(obj_handle->obj_hdr->file_buff) || - !(qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, - &(obj_handle->str_table)))) { + if (!obj_handle->obj_hdr->file_buff || + !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, + &obj_handle->str_table)) { pr_err("QAT: uof doesn't have effective images\n"); goto out_err; } @@ -984,8 +984,8 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) goto out_check_uof_aemask_err; } qat_uclo_init_uword_num(handle); - qat_uclo_map_initmem_table(&(obj_handle->encap_uof_obj), - &(obj_handle->init_mem_tab)); + qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, + &obj_handle->init_mem_tab); if (qat_uclo_set_ae_mode(handle)) goto out_check_uof_aemask_err; return 0; @@ -1143,7 +1143,7 @@ static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, /* load the default page and set assigned CTX PC * to the entrypoint address */ for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { - if (!test_bit(ae, (unsigned long *)&(image->ae_assigned))) + if (!test_bit(ae, (unsigned long *)&image->ae_assigned)) continue; /* find the slice to which this image is assigned */ for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { @@ -1177,9 +1177,9 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) if (qat_uclo_init_globals(handle)) return -EINVAL; for (i = 0; i < obj_handle->uimage_num; i++) { - if (!(obj_handle->ae_uimage[i].img_ptr)) + if (!obj_handle->ae_uimage[i].img_ptr) return -EINVAL; - if (qat_uclo_init_ustore(handle, &(obj_handle->ae_uimage[i]))) + if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) return -EINVAL; qat_uclo_wr_uimage_pages(handle, obj_handle->ae_uimage[i].img_ptr); -- cgit v1.2.3 From df0088f50712f9540769c542d0b4d27acc4fc527 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:32 -0700 Subject: crypto: qat - use min_t macro prefer min_t() macro over two open-coded logical tests Signed-off-by: Bruce Allan Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_uclo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 17a9954f831e..b1a16eb5a36d 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -901,8 +901,8 @@ static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) (unsigned long *)&handle->hal_handle->ae_mask)) continue; ae_data = &obj_handle->ae_data[ae]; - for (s = 0; s < ae_data->slice_num && s < ICP_QAT_UCLO_MAX_CTX; - s++) { + for (s = 0; s < min_t(unsigned int, ae_data->slice_num, + ICP_QAT_UCLO_MAX_CTX); s++) { if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) continue; uof_image = ae_data->ae_slices[s].encap_image->img_ptr; -- cgit v1.2.3 From 8c1f8e3bbf60d0d06190be81f55d5199d52a463f Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:41 -0700 Subject: crypto: qat - change slice->regions to slice->region Change ptr name slice->regions to slice->region to reflect the same in the page struct. Signed-off-by: Pingchao Yang Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/icp_qat_uclo.h | 2 +- drivers/crypto/qat/qat_common/qat_uclo.c | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 120fbce517a9..2132a8cbc4ec 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -124,7 +124,7 @@ struct icp_qat_uclo_region { }; struct icp_qat_uclo_aeslice { - struct icp_qat_uclo_region *regions; + struct icp_qat_uclo_region *region; struct icp_qat_uclo_page *page; struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; struct icp_qat_uclo_encapme *encap_image; diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index b1a16eb5a36d..557fa606da89 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -79,37 +79,37 @@ static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, } else { ae_slice->ctx_mask_assigned = 0; } - ae_slice->regions = kzalloc(sizeof(*ae_slice->regions), GFP_KERNEL); - if (!ae_slice->regions) + ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL); + if (!ae_slice->region) return -ENOMEM; ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL); if (!ae_slice->page) goto out_err; page = ae_slice->page; page->encap_page = encap_image->page; - ae_slice->page->region = ae_slice->regions; + ae_slice->page->region = ae_slice->region; ae_data->slice_num++; return 0; out_err: - kfree(ae_slice->regions); - ae_slice->regions = NULL; + kfree(ae_slice->region); + ae_slice->region = NULL; return -ENOMEM; } static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) { - unsigned int ss = 0; + unsigned int i; if (!ae_data) { pr_err("QAT: bad argument, ae_data is NULL\n "); return -EINVAL; } - for (ss = 0; ss < ae_data->slice_num; ss++) { - kfree(ae_data->ae_slices[ss].regions); - ae_data->ae_slices[ss].regions = NULL; - kfree(ae_data->ae_slices[ss].page); - ae_data->ae_slices[ss].page = NULL; + for (i = 0; i < ae_data->slice_num; i++) { + kfree(ae_data->ae_slices[i].region); + ae_data->ae_slices[i].region = NULL; + kfree(ae_data->ae_slices[i].page); + ae_data->ae_slices[i].page = NULL; } return 0; } -- cgit v1.2.3 From 9a147cb3232fd8dbd44ed4628c6c0d05033d4c61 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:46 -0700 Subject: crypto: qat - change ae_num to ae_id Change the logic how acceleration engines are indexed to make it easier to read. Aslo some return code values updates to better reflect what failed. Signed-off-by: Pingchao Yang Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_hal.c | 26 ++++++------- drivers/crypto/qat/qat_common/qat_uclo.c | 67 +++++++++++++++----------------- 2 files changed, 45 insertions(+), 48 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 28da876ee268..da9626b6b6b4 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -424,7 +424,7 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl & (~MC_TIMESTAMP_ENABLE)); - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (!(handle->hal_handle->ae_mask & (1 << ae))) continue; qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); @@ -492,7 +492,7 @@ int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) goto out_err; /* Set undefined power-up/reset states to reasonable default values */ - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (!(handle->hal_handle->ae_mask & (1 << ae))) continue; qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, @@ -608,7 +608,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) unsigned int savctx = 0; int ret = 0; - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (!(handle->hal_handle->ae_mask & (1 << ae))) continue; for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { @@ -637,7 +637,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); qat_hal_enable_ctx(handle, ae, ctx_mask); } - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (!(handle->hal_handle->ae_mask & (1 << ae))) continue; /* wait for AE to finish */ @@ -674,17 +674,16 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) #define ICP_DH895XCC_PMISC_BAR 1 int qat_hal_init(struct adf_accel_dev *accel_dev) { - unsigned char ae = 0; - unsigned int csr_val = 0; - unsigned int max_en_ae_num = 0; - struct icp_qat_fw_loader_handle *handle = NULL; + unsigned char ae; + unsigned int max_en_ae_id = 0; + struct icp_qat_fw_loader_handle *handle; struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR]; handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) - goto out_handle; + return -ENOMEM; handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + ICP_DH895XCC_CAP_OFFSET; @@ -713,9 +712,9 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) handle->hal_handle->max_ustore; handle->hal_handle->aes[ae].live_ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; - max_en_ae_num = ae; + max_en_ae_id = ae; } - handle->hal_handle->ae_max_num = max_en_ae_num; + handle->hal_handle->ae_max_num = max_en_ae_id + 1; /* take all AEs out of reset */ if (qat_hal_clr_reset(handle)) { pr_err("QAT: qat_hal_clr_reset error\n"); @@ -724,7 +723,9 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) if (qat_hal_clear_gpr(handle)) goto out_err; /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + unsigned int csr_val = 0; + if (!(hw_data->ae_mask & (1 << ae))) continue; qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); @@ -738,7 +739,6 @@ out_err: kfree(handle->hal_handle); out_hal_handle: kfree(handle); -out_handle: return -EFAULT; } diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 557fa606da89..ebd5da03dc71 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -214,11 +214,10 @@ qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, static int qat_uclo_parse_num(char *str, unsigned int *num) { - char buf[16]; + char buf[16] = {0}; unsigned long ae = 0; int i; - memset(buf, '\0', 16); strncpy(buf, str, 15); for (i = 0; i < 16; i++) { if (!isdigit(buf[i])) { @@ -418,13 +417,13 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), GFP_KERNEL); if (!fill_data) - return -EFAULT; + return -ENOMEM; for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) memcpy(&fill_data[i], &uof_image->fill_pattern, sizeof(uint64_t)); page = image->page; - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) continue; ustore_size = obj_handle->ae_data[ae].eff_ustore_size; @@ -442,11 +441,9 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) { - unsigned int i; - int status = 0; + int i, ae; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; - int ae; for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { if (initmem->num_in_bytes) { @@ -473,7 +470,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) &obj_handle-> umem_init_tab[ae]); } - return status; + return 0; } static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, @@ -526,7 +523,7 @@ qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, { struct icp_qat_uof_filechunkhdr *file_chunk; struct icp_qat_uclo_objhdr *obj_hdr; - void *chunk; + char *chunk; int i; file_chunk = (struct icp_qat_uof_filechunkhdr *) @@ -536,7 +533,7 @@ qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, ICP_QAT_UOF_OBJID_LEN)) { chunk = buf + file_chunk->offset; if (file_chunk->checksum != qat_uclo_calc_str_checksum( - (char *)chunk, file_chunk->size)) + chunk, file_chunk->size)) break; obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); if (!obj_hdr) @@ -595,7 +592,7 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, return 0; } -static void qat_uclo_map_image_pages(struct icp_qat_uof_encap_obj +static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uof_image *img, struct icp_qat_uclo_encap_page *page) @@ -631,7 +628,7 @@ static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uclo_encapme *ae_uimage, int max_image) { - int a = 0, i; + int i, j; struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; struct icp_qat_uof_image *image; struct icp_qat_uof_objtable *ae_regtab; @@ -640,7 +637,7 @@ static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uof_encap_obj *encap_uof_obj = &obj_handle->encap_uof_obj; - for (a = 0; a < max_image; a++) { + for (j = 0; j < max_image; j++) { chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMAG, chunk_hdr); if (!chunk_hdr) @@ -650,37 +647,37 @@ static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, ae_regtab = (struct icp_qat_uof_objtable *) (image->reg_tab_offset + obj_handle->obj_hdr->file_buff); - ae_uimage[a].ae_reg_num = ae_regtab->entry_num; - ae_uimage[a].ae_reg = (struct icp_qat_uof_ae_reg *) + ae_uimage[j].ae_reg_num = ae_regtab->entry_num; + ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *) (((char *)ae_regtab) + sizeof(struct icp_qat_uof_objtable)); init_reg_sym_tab = (struct icp_qat_uof_objtable *) (image->init_reg_sym_tab + obj_handle->obj_hdr->file_buff); - ae_uimage[a].init_regsym_num = init_reg_sym_tab->entry_num; - ae_uimage[a].init_regsym = (struct icp_qat_uof_init_regsym *) + ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; + ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *) (((char *)init_reg_sym_tab) + sizeof(struct icp_qat_uof_objtable)); sbreak_tab = (struct icp_qat_uof_objtable *) (image->sbreak_tab + obj_handle->obj_hdr->file_buff); - ae_uimage[a].sbreak_num = sbreak_tab->entry_num; - ae_uimage[a].sbreak = (struct icp_qat_uof_sbreak *) + ae_uimage[j].sbreak_num = sbreak_tab->entry_num; + ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *) (((char *)sbreak_tab) + sizeof(struct icp_qat_uof_objtable)); - ae_uimage[a].img_ptr = image; + ae_uimage[j].img_ptr = image; if (qat_uclo_check_image_compat(encap_uof_obj, image)) goto out_err; - ae_uimage[a].page = + ae_uimage[j].page = kzalloc(sizeof(struct icp_qat_uclo_encap_page), GFP_KERNEL); - if (!ae_uimage[a].page) + if (!ae_uimage[j].page) goto out_err; - qat_uclo_map_image_pages(encap_uof_obj, image, - ae_uimage[a].page); + qat_uclo_map_image_page(encap_uof_obj, image, + ae_uimage[j].page); } - return a; + return j; out_err: - for (i = 0; i < a; i++) + for (i = 0; i < j; i++) kfree(ae_uimage[i].page); return 0; } @@ -875,7 +872,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) return -EINVAL; } } - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) continue; @@ -896,7 +893,7 @@ static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) struct icp_qat_uclo_aedata *ae_data; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (!test_bit(ae, (unsigned long *)&handle->hal_handle->ae_mask)) continue; @@ -1041,7 +1038,7 @@ out_objbuf_err: void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - int a; + unsigned int a; if (!obj_handle) return; @@ -1050,7 +1047,7 @@ void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) for (a = 0; a < obj_handle->uimage_num; a++) kfree(obj_handle->ae_uimage[a].page); - for (a = 0; a <= (int)handle->hal_handle->ae_max_num; a++) + for (a = 0; a < handle->hal_handle->ae_max_num; a++) qat_uclo_free_ae_data(&obj_handle->ae_data[a]); kfree(obj_handle->obj_hdr); @@ -1127,8 +1124,8 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, } } -static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, - struct icp_qat_uof_image *image) +static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_image *image) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ctx_mask, s; @@ -1142,7 +1139,7 @@ static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, ctx_mask = 0x55; /* load the default page and set assigned CTX PC * to the entrypoint address */ - for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (!test_bit(ae, (unsigned long *)&image->ae_assigned)) continue; /* find the slice to which this image is assigned */ @@ -1181,8 +1178,8 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) return -EINVAL; if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) return -EINVAL; - qat_uclo_wr_uimage_pages(handle, - obj_handle->ae_uimage[i].img_ptr); + qat_uclo_wr_uimage_page(handle, + obj_handle->ae_uimage[i].img_ptr); } return 0; } -- cgit v1.2.3 From 689917211cb9d4ca6f90765eeb228ac2727f5dbc Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:52 -0700 Subject: crypto: qat - Updated print outputs Updated pr_err output to make it more consistent. Signed-off-by: Pingchao Yang Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_accel_engine.c | 4 ++-- drivers/crypto/qat/qat_common/adf_init.c | 2 +- drivers/crypto/qat/qat_common/qat_hal.c | 3 +-- drivers/crypto/qat/qat_common/qat_uclo.c | 18 +++++++++--------- 4 files changed, 13 insertions(+), 14 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index 25801fe3c375..c77453b900a3 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -67,11 +67,11 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev) uof_size = loader_data->uof_fw->size; uof_addr = (void *)loader_data->uof_fw->data; if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { - pr_err("QAT: Failed to map uof\n"); + pr_err("QAT: Failed to map UOF\n"); goto out_err; } if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { - pr_err("QAT: Failed to map uof\n"); + pr_err("QAT: Failed to map UOF\n"); goto out_err; } return 0; diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 5c3d6f12951a..5c0e47a00a87 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -136,7 +136,7 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); if (adf_ae_fw_load(accel_dev)) { - pr_err("Failed to load acceleration FW\n"); + pr_err("QAT: Failed to load acceleration FW\n"); adf_ae_fw_release(accel_dev); return -EFAULT; } diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index da9626b6b6b4..8d686a2edde6 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -849,8 +849,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, unsigned int ctxarb_ctl, ctx_enables; if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) { - pr_err("QAT: invalid instructs inst_num=%d, micro_inst=0x%p\n ", - inst_num, micro_inst); + pr_err("QAT: invalid instruction num %d\n", inst_num); return -EINVAL; } /* save current context */ diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index ebd5da03dc71..258009178031 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -132,7 +132,7 @@ static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) return -EINVAL; } if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { - pr_err("QAT: bad uof version, major 0x%x, minor 0x%x\n", + pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", maj, min); return -EINVAL; } @@ -249,7 +249,7 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, } str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); if (!str) { - pr_err("QAT: AE name assigned in uof init table is NULL\n"); + pr_err("QAT: AE name assigned in UOF init table is NULL\n"); return -EINVAL; } if (qat_uclo_parse_num(str, ae)) { @@ -751,14 +751,14 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) unsigned int maj_ver, prod_type = obj_handle->prod_type; if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { - pr_err("QAT: uof type 0x%x not match with cur platform 0x%x\n", + pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); return -EINVAL; } maj_ver = obj_handle->prod_rev & 0xff; if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) || (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) { - pr_err("QAT: uof majVer 0x%x out of range\n", maj_ver); + pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver); return -EINVAL; } return 0; @@ -868,7 +868,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) return 0; if (obj_handle->init_mem_tab.entry_num) { if (qat_uclo_init_memory(handle)) { - pr_err("QAT: initalize memory failed\n"); + pr_err("QAT: initialize memory failed\n"); return -EINVAL; } } @@ -917,13 +917,13 @@ static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, (char)ICP_QAT_LOC_MEM0_MODE (uof_image->ae_mode))) { - pr_err("QAT: qat_hal_set_ae_lm_mode error\n "); + pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); return -EFAULT; } if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, (char)ICP_QAT_LOC_MEM1_MODE (uof_image->ae_mode))) { - pr_err("QAT: qat_hal_set_ae_lm_mode error\n "); + pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); return -EFAULT; } } @@ -961,14 +961,14 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) obj_handle->prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (qat_uclo_check_uof_compat(obj_handle)) { - pr_err("QAT: uof incompatible\n"); + pr_err("QAT: UOF incompatible\n"); return -EINVAL; } obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; if (!obj_handle->obj_hdr->file_buff || !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, &obj_handle->str_table)) { - pr_err("QAT: uof doesn't have effective images\n"); + pr_err("QAT: UOF doesn't have effective images\n"); goto out_err; } obj_handle->uimage_num = -- cgit v1.2.3 From d9a44abf3aede89c3dbb6dfa8a95c856b9ae8da3 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:55:57 -0700 Subject: crypto: qat - Use hweight for bit counting Use predefined hweight32 function instead of writing a new one. Signed-off-by: Pingchao Yang Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_hal.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 8d686a2edde6..9b8a31521ff3 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -795,17 +795,6 @@ static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); } -static int qat_hal_count_bits(unsigned int word) -{ - int n = 0; - - while (word) { - n++; - word &= word - 1; - } - return n; -} - void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, unsigned int *data) @@ -822,9 +811,9 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, ((data[i] & 0xff00) << 2) | (0x3 << 8) | (data[i] & 0xff); uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28); - uwrd_hi |= (qat_hal_count_bits(data[i] & 0xffff) & 0x1) << 8; + uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8; tmp = ((data[i] >> 0x10) & 0xffff); - uwrd_hi |= (qat_hal_count_bits(tmp) & 0x1) << 9; + uwrd_hi |= (hweight32(tmp) & 0x1) << 9; qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); } -- cgit v1.2.3 From 4f74c3989b14338544b65360ca4f8587c63d7fd9 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 25 Jul 2014 15:56:03 -0700 Subject: crypto: qat - Fixed SKU1 dev issue Fix for issue with SKU1 device. SKU1 device has 8 micro engines as opposed to 12 in other SKUs so it was not possible to start the non-existing micro engines. Signed-off-by: Bo Cui Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_uclo.c | 6 +----- drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 258009178031..1e27f9f7fddf 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -256,10 +256,6 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, pr_err("QAT: Parse num for AE number failed\n"); return -EINVAL; } - if (!test_bit(*ae, (unsigned long *)&handle->hal_handle->ae_mask)) { - pr_err("QAT: ae %d to be init is fused off\n", *ae); - return -EINVAL; - } if (*ae >= ICP_QAT_UCLO_MAX_AE) { pr_err("QAT: ae %d out of range\n", *ae); return -EINVAL; @@ -456,7 +452,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) (sizeof(struct icp_qat_uof_memvar_attr) * initmem->val_attr_num)); } - for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) { + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (qat_hal_batch_wr_lm(handle, ae, obj_handle->lm_init_tab[ae])) { pr_err("QAT: fail to batch init lmem for AE %d\n", ae); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index c5ce236aa979..b707f292b377 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -59,7 +59,7 @@ #define ADF_DH895XCC_FUSECTL_SKU_4 0x3 #define ADF_DH895XCC_MAX_ACCELERATORS 6 #define ADF_DH895XCC_MAX_ACCELENGINES 12 -#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 18 +#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13 #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF #define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C -- cgit v1.2.3 From 593901aa046b175e2948d361e439e13e58036146 Mon Sep 17 00:00:00 2001 From: Pramod Gurav Date: Mon, 28 Jul 2014 17:45:56 +0530 Subject: crypto: atmel-sha - Switch to managed version of kzalloc This patch switches data allocation from kzalloc to devm_kzalloc. It also removed some kfree() on data that was earlier allocated using devm_kzalloc(). CC: Herbert Xu CC: "David S. Miller" CC: Grant Likely CC: Rob Herring CC: Nicolas Ferre Signed-off-by: Pramod Gurav Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 0618be06b9fb..9a4f69eaa5e0 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -1353,7 +1353,6 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd GFP_KERNEL); if (!pdata->dma_slave) { dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); - devm_kfree(&pdev->dev, pdata); return ERR_PTR(-ENOMEM); } @@ -1375,7 +1374,8 @@ static int atmel_sha_probe(struct platform_device *pdev) unsigned long sha_phys_size; int err; - sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL); + sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev), + GFP_KERNEL); if (sha_dd == NULL) { dev_err(dev, "unable to alloc data struct.\n"); err = -ENOMEM; @@ -1490,8 +1490,6 @@ clk_err: free_irq(sha_dd->irq, sha_dd); res_err: tasklet_kill(&sha_dd->done_task); - kfree(sha_dd); - sha_dd = NULL; sha_dd_err: dev_err(dev, "initialization failed.\n"); @@ -1523,9 +1521,6 @@ static int atmel_sha_remove(struct platform_device *pdev) if (sha_dd->irq >= 0) free_irq(sha_dd->irq, sha_dd); - kfree(sha_dd); - sha_dd = NULL; - return 0; } -- cgit v1.2.3 From c659d07f11a359aa053e35d3ee549289c86fab64 Mon Sep 17 00:00:00 2001 From: Pramod Gurav Date: Mon, 28 Jul 2014 17:46:33 +0530 Subject: crypto: atmel-tdes - Switch to managed version of kzalloc This patch switches data allocation from kzalloc to devm_kzalloc. It also removes some kfree() on data that was earlier allocated using devm_kzalloc() from probe as well as remove funtions. CC: Herbert Xu CC: "David S. Miller" CC: Grant Likely CC: Rob Herring Signed-off-by: Pramod Gurav Signed-off-by: Herbert Xu --- drivers/crypto/atmel-tdes.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 6cde5b530c69..d3a9041938ea 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1337,7 +1337,6 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p GFP_KERNEL); if (!pdata->dma_slave) { dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); - devm_kfree(&pdev->dev, pdata); return ERR_PTR(-ENOMEM); } @@ -1359,7 +1358,7 @@ static int atmel_tdes_probe(struct platform_device *pdev) unsigned long tdes_phys_size; int err; - tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL); + tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL); if (tdes_dd == NULL) { dev_err(dev, "unable to alloc data struct.\n"); err = -ENOMEM; @@ -1483,8 +1482,6 @@ tdes_irq_err: res_err: tasklet_kill(&tdes_dd->done_task); tasklet_kill(&tdes_dd->queue_task); - kfree(tdes_dd); - tdes_dd = NULL; tdes_dd_err: dev_err(dev, "initialization failed.\n"); @@ -1519,9 +1516,6 @@ static int atmel_tdes_remove(struct platform_device *pdev) if (tdes_dd->irq >= 0) free_irq(tdes_dd->irq, tdes_dd); - kfree(tdes_dd); - tdes_dd = NULL; - return 0; } -- cgit v1.2.3 From 6391723293bb55f05b43973bbbc7a06822b50555 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 30 Jul 2014 15:41:32 -0500 Subject: crypto: ccp - Do not sign extend input data to CCP The CCP hardware interprets all numbers as unsigned numbers, therefore sign extending input data is not valid. Modify the function calls for RSA and ECC to not perform sign extending. This patch is based on the cryptodev-2.6 kernel tree. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-ops.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 9ae006d69df4..8729364261d7 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_ksb; ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES, - true); + false); ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { @@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_exp; ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES, - true); + false); src.address += o_len; /* Adjust the address for the copy operation */ ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES, - true); + false); src.address -= o_len; /* Reset the address to original value */ /* Prepare the output area for the operation */ @@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Copy the ECC modulus */ ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first operand */ ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1, ecc->u.mm.operand_1_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { /* Copy the second operand */ ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2, ecc->u.mm.operand_2_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; } @@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Copy the ECC modulus */ ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first point X and Y coordinate */ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x, ecc->u.pm.point_1.x_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, ecc->u.pm.point_1.y_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; /* Set the first point Z coordianate to 1 */ @@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Copy the second point X and Y coordinate */ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, ecc->u.pm.point_2.x_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, ecc->u.pm.point_2.y_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; /* Set the second point Z coordianate to 1 */ @@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Copy the Domain "a" parameter */ ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, ecc->u.pm.domain_a_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { /* Copy the scalar value */ ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar, ecc->u.pm.scalar_len, - CCP_ECC_OPERAND_SIZE, true); + CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; } } -- cgit v1.2.3