summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 20:52:51 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 20:52:51 +0400
commit3e7a716a92a0e051f5502c7b689f8c9127c37c33 (patch)
tree2ebb892eb3a024f108e68a9577c767a53b955a4a /drivers
parentc2df436bd2504f52808c10ab7d7da832f61ad3f0 (diff)
parentce5481d01f67ad304908ec2113515440c0fa86eb (diff)
downloadlinux-3e7a716a92a0e051f5502c7b689f8c9127c37c33.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - CTR(AES) optimisation on x86_64 using "by8" AVX. - arm64 support to ccp - Intel QAT crypto driver - Qualcomm crypto engine driver - x86-64 assembly optimisation for 3DES - CTR(3DES) speed test - move FIPS panic from module.c so that it only triggers on crypto modules - SP800-90A Deterministic Random Bit Generator (drbg). - more test vectors for ghash. - tweak self tests to catch partial block bugs. - misc fixes. * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (94 commits) crypto: drbg - fix failure of generating multiple of 2**16 bytes crypto: ccp - Do not sign extend input data to CCP crypto: testmgr - add missing spaces to drbg error strings crypto: atmel-tdes - Switch to managed version of kzalloc crypto: atmel-sha - Switch to managed version of kzalloc crypto: testmgr - use chunks smaller than algo block size in chunk tests crypto: qat - Fixed SKU1 dev issue crypto: qat - Use hweight for bit counting crypto: qat - Updated print outputs crypto: qat - change ae_num to ae_id crypto: qat - change slice->regions to slice->region crypto: qat - use min_t macro crypto: qat - remove unnecessary parentheses crypto: qat - remove unneeded header crypto: qat - checkpatch blank lines crypto: qat - remove unnecessary return codes crypto: Resolve shadow warnings crypto: ccp - Remove "select OF" from Kconfig crypto: caam - fix DECO RSR polling crypto: qce - Let 'DEV_QCE' depend on both HAS_DMA and HAS_IOMEM ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/Kconfig20
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/atmel-sha.c9
-rw-r--r--drivers/crypto/atmel-tdes.c8
-rw-r--r--drivers/crypto/caam/caamalg.c80
-rw-r--r--drivers/crypto/caam/caamhash.c186
-rw-r--r--drivers/crypto/caam/caamrng.c79
-rw-r--r--drivers/crypto/caam/ctrl.c76
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c6
-rw-r--r--drivers/crypto/caam/regs.h105
-rw-r--r--drivers/crypto/ccp/Makefile5
-rw-r--r--drivers/crypto/ccp/ccp-dev.c34
-rw-r--r--drivers/crypto/ccp/ccp-dev.h14
-rw-r--r--drivers/crypto/ccp/ccp-ops.c26
-rw-r--r--drivers/crypto/ccp/ccp-pci.c39
-rw-r--r--drivers/crypto/ccp/ccp-platform.c230
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/crypto/qat/Kconfig23
-rw-r--r--drivers/crypto/qat/Makefile2
-rw-r--r--drivers/crypto/qat/qat_common/Makefile14
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h205
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_engine.c168
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c259
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c361
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.h87
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_common.h100
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h83
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_user.h94
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h192
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c490
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c215
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c388
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c567
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.h63
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h160
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c304
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h118
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw.h316
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h131
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_la.h404
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h78
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hal.h125
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h305
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h377
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c1038
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c284
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h83
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c1393
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c1181
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/Makefile8
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_admin.c144
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c214
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h86
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c449
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.h67
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c159
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c266
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/qat_admin.c107
-rw-r--r--drivers/crypto/qce/Makefile6
-rw-r--r--drivers/crypto/qce/ablkcipher.c431
-rw-r--r--drivers/crypto/qce/cipher.h68
-rw-r--r--drivers/crypto/qce/common.c438
-rw-r--r--drivers/crypto/qce/common.h102
-rw-r--r--drivers/crypto/qce/core.c286
-rw-r--r--drivers/crypto/qce/core.h68
-rw-r--r--drivers/crypto/qce/dma.c186
-rw-r--r--drivers/crypto/qce/dma.h58
-rw-r--r--drivers/crypto/qce/regs-v5.h334
-rw-r--r--drivers/crypto/qce/sha.c588
-rw-r--r--drivers/crypto/qce/sha.h81
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c25
74 files changed, 14524 insertions, 180 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 02f177aeb16c..2fb0fdfc87df 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
- depends on X86 && PCI
+ depends on (X86 && PCI) || ARM64
default n
help
The AMD Cryptographic Coprocessor provides hardware support
@@ -418,4 +418,22 @@ config CRYPTO_DEV_MXS_DCP
To compile this driver as a module, choose M here: the module
will be called mxs-dcp.
+source "drivers/crypto/qat/Kconfig"
+
+config CRYPTO_DEV_QCE
+ tristate "Qualcomm crypto engine accelerator"
+ depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_XTS
+ select CRYPTO_CTR
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ help
+ This driver supports Qualcomm crypto engine accelerator
+ hardware. To compile this driver as a module, choose M here. The
+ module will be called qcrypto.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 482f090d16d0..3924f93d5774 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -23,3 +23,5 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 37f9cc98ba17..e4c6c58fbb03 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1292,7 +1292,7 @@ static struct platform_driver crypto4xx_driver = {
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
- .remove = crypto4xx_remove,
+ .remove = __exit_p(crypto4xx_remove),
};
module_platform_driver(crypto4xx_driver);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 0618be06b9fb..9a4f69eaa5e0 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1353,7 +1353,6 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd
GFP_KERNEL);
if (!pdata->dma_slave) {
dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
- devm_kfree(&pdev->dev, pdata);
return ERR_PTR(-ENOMEM);
}
@@ -1375,7 +1374,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
unsigned long sha_phys_size;
int err;
- sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
+ sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev),
+ GFP_KERNEL);
if (sha_dd == NULL) {
dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
@@ -1490,8 +1490,6 @@ clk_err:
free_irq(sha_dd->irq, sha_dd);
res_err:
tasklet_kill(&sha_dd->done_task);
- kfree(sha_dd);
- sha_dd = NULL;
sha_dd_err:
dev_err(dev, "initialization failed.\n");
@@ -1523,9 +1521,6 @@ static int atmel_sha_remove(struct platform_device *pdev)
if (sha_dd->irq >= 0)
free_irq(sha_dd->irq, sha_dd);
- kfree(sha_dd);
- sha_dd = NULL;
-
return 0;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 6cde5b530c69..d3a9041938ea 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1337,7 +1337,6 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p
GFP_KERNEL);
if (!pdata->dma_slave) {
dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
- devm_kfree(&pdev->dev, pdata);
return ERR_PTR(-ENOMEM);
}
@@ -1359,7 +1358,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
unsigned long tdes_phys_size;
int err;
- tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL);
+ tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
if (tdes_dd == NULL) {
dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
@@ -1483,8 +1482,6 @@ tdes_irq_err:
res_err:
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
- kfree(tdes_dd);
- tdes_dd = NULL;
tdes_dd_err:
dev_err(dev, "initialization failed.\n");
@@ -1519,9 +1516,6 @@ static int atmel_tdes_remove(struct platform_device *pdev)
if (tdes_dd->irq >= 0)
free_irq(tdes_dd->irq, tdes_dd);
- kfree(tdes_dd);
- tdes_dd = NULL;
-
return 0;
}
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c09ce1f040d3..a80ea853701d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type)
{
u32 *jump_cmd, *uncond_jump_cmd;
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
append_operation(desc, type | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT);
@@ -786,7 +793,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
@@ -1313,8 +1320,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
DMA_FROM_DEVICE, dst_chained);
}
- /* Check if data are contiguous */
iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Check if data are contiguous */
if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
iv_dma || src_nents || iv_dma + ivsize !=
sg_dma_address(req->src)) {
@@ -1345,8 +1357,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
*all_contig_ptr = all_contig;
sec4_sg_index = 0;
@@ -1369,6 +1379,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
return edesc;
}
@@ -1494,8 +1510,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
DMA_FROM_DEVICE, dst_chained);
}
- /* Check if data are contiguous */
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Check if data are contiguous */
if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
contig &= ~GIV_SRC_CONTIG;
@@ -1534,8 +1555,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
*contig_ptr = contig;
sec4_sg_index = 0;
@@ -1559,6 +1578,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
return edesc;
}
@@ -1650,11 +1675,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
DMA_FROM_DEVICE, dst_chained);
}
+ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
/*
* Check if iv can be contiguous with source and destination.
* If so, include it. If not, create scatterlist.
*/
- iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
iv_contig = true;
else
@@ -1693,6 +1723,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
edesc->iv_dma = iv_dma;
#ifdef DEBUG
@@ -2441,8 +2476,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
static int __init caam_algapi_init(void)
{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
int i = 0, err = 0;
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
+
INIT_LIST_HEAD(&alg_list);
/* register crypto algorithms the device supports */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0d9284ef96a8..b464d03ebf40 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -137,13 +137,20 @@ struct caam_hash_state {
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
- struct caam_hash_state *state,
- int ctx_len)
+static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state,
+ int ctx_len)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, state->ctx_dma)) {
+ dev_err(jrdev, "unable to map ctx\n");
+ return -ENOMEM;
+ }
+
append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+
+ return 0;
}
/* Map req->result, and append seq_out_ptr command that points to it */
@@ -201,14 +208,19 @@ try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
}
/* Map state->caam_ctx, and add it to link table */
-static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
- struct caam_hash_state *state,
- int ctx_len,
- struct sec4_sg_entry *sec4_sg,
- u32 flag)
+static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state, int ctx_len,
+ struct sec4_sg_entry *sec4_sg, u32 flag)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+ if (dma_mapping_error(jrdev, state->ctx_dma)) {
+ dev_err(jrdev, "unable to map ctx\n");
+ return -ENOMEM;
+ }
+
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+
+ return 0;
}
/* Common shared descriptor commands */
@@ -487,11 +499,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
digestsize, 1);
#endif
}
- *keylen = digestsize;
-
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+ *keylen = digestsize;
+
kfree(desc);
return ret;
@@ -706,7 +718,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
kfree(edesc);
#ifdef DEBUG
@@ -741,7 +753,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
kfree(edesc);
#ifdef DEBUG
@@ -808,12 +820,11 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
- ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_BIDIRECTIONAL);
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
+ if (ret)
+ return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
edesc->sec4_sg + 1,
@@ -839,6 +850,14 @@ static int ahash_update_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
to_hash, LDST_SGF);
@@ -911,23 +930,34 @@ static int ahash_final_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
edesc->src_nents = 0;
- ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
- DMA_TO_DEVICE);
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -989,11 +1019,11 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
- DMA_TO_DEVICE);
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
@@ -1002,11 +1032,22 @@ static int ahash_finup_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
sec4_sg_src_index, chained);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
buflen + req->nbytes, LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1056,8 +1097,7 @@ static int ahash_digest(struct ahash_request *req)
}
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->src_nents = src_nents;
edesc->chained = chained;
@@ -1067,6 +1107,12 @@ static int ahash_digest(struct ahash_request *req)
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
@@ -1077,6 +1123,10 @@ static int ahash_digest(struct ahash_request *req)
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1125,11 +1175,19 @@ static int ahash_final_no_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map src\n");
+ return -ENOMEM;
+ }
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
edesc->src_nents = 0;
#ifdef DEBUG
@@ -1197,9 +1255,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
+ edesc->dst_dma = 0;
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
@@ -1216,9 +1272,19 @@ static int ahash_update_no_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
- map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ if (ret)
+ return ret;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1297,8 +1363,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
state->buf_dma, buflen,
@@ -1307,11 +1371,22 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
chained);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
req->nbytes, LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1380,13 +1455,19 @@ static int ahash_update_first(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
+ edesc->dst_dma = 0;
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents,
edesc->sec4_sg, 0);
+ edesc->sec4_sg_dma = dma_map_single(jrdev,
+ edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
@@ -1404,7 +1485,9 @@ static int ahash_update_first(struct ahash_request *req)
append_seq_in_ptr(desc, src_dma, to_hash, options);
- map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ if (ret)
+ return ret;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1453,6 +1536,7 @@ static int ahash_init(struct ahash_request *req)
state->final = ahash_final_no_ctx;
state->current_buf = 0;
+ state->buf_dma = 0;
return 0;
}
@@ -1787,8 +1871,36 @@ caam_hash_alloc(struct caam_hash_template *template,
static int __init caam_algapi_hash_init(void)
{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
int i = 0, err = 0;
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 8c07d3153f12..ae31e555793c 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -185,7 +185,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
max - copied_idx, false);
}
-static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
+static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
{
struct device *jrdev = ctx->jrdev;
u32 *desc = ctx->sh_desc;
@@ -203,13 +203,18 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1);
#endif
+ return 0;
}
-static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
{
struct device *jrdev = ctx->jrdev;
struct buf_data *bd = &ctx->bufs[buf_id];
@@ -220,12 +225,17 @@ static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
HDR_REVERSE);
bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, bd->addr)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1);
#endif
+ return 0;
}
static void caam_cleanup(struct hwrng *rng)
@@ -242,24 +252,44 @@ static void caam_cleanup(struct hwrng *rng)
rng_unmap_ctx(rng_ctx);
}
-static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
{
struct buf_data *bd = &ctx->bufs[buf_id];
+ int err;
+
+ err = rng_create_job_desc(ctx, buf_id);
+ if (err)
+ return err;
- rng_create_job_desc(ctx, buf_id);
atomic_set(&bd->empty, BUF_EMPTY);
submit_job(ctx, buf_id == ctx->current_buf);
wait_for_completion(&bd->filled);
+
+ return 0;
}
-static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
{
+ int err;
+
ctx->jrdev = jrdev;
- rng_create_sh_desc(ctx);
+
+ err = rng_create_sh_desc(ctx);
+ if (err)
+ return err;
+
ctx->current_buf = 0;
ctx->cur_buf_idx = 0;
- caam_init_buf(ctx, 0);
- caam_init_buf(ctx, 1);
+
+ err = caam_init_buf(ctx, 0);
+ if (err)
+ return err;
+
+ err = caam_init_buf(ctx, 1);
+ if (err)
+ return err;
+
+ return 0;
}
static struct hwrng caam_rng = {
@@ -278,6 +308,35 @@ static void __exit caam_rng_exit(void)
static int __init caam_rng_init(void)
{
struct device *dev;
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
+ int err;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
@@ -287,7 +346,9 @@ static int __init caam_rng_init(void)
rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
if (!rng_ctx)
return -ENOMEM;
- caam_init_rng(rng_ctx, dev);
+ err = caam_init_rng(rng_ctx, dev);
+ if (err)
+ return err;
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 1c38f86bf63a..3cade79ea41e 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -5,6 +5,7 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -87,6 +88,17 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
/* Set the bit to request direct access to DECO0 */
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+ if (ctrlpriv->virt_en == 1) {
+ setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
+ while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
+ --timeout)
+ cpu_relax();
+
+ timeout = 100000;
+ }
+
setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
@@ -129,6 +141,9 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
*status = rd_reg32(&topregs->deco.op_status_hi) &
DECO_OP_STATUS_HI_ERR_MASK;
+ if (ctrlpriv->virt_en == 1)
+ clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
/* Mark the DECO as free */
clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
@@ -295,9 +310,6 @@ static int caam_remove(struct platform_device *pdev)
/* Unmap controller region */
iounmap(&topregs->ctrl);
- kfree(ctrlpriv->jrpdev);
- kfree(ctrlpriv);
-
return ret;
}
@@ -380,9 +392,11 @@ static int caam_probe(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
- u64 cha_vid;
+ u32 scfgr, comp_params;
+ u32 cha_vid_ls;
- ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
+ ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
+ GFP_KERNEL);
if (!ctrlpriv)
return -ENOMEM;
@@ -413,13 +427,40 @@ static int caam_probe(struct platform_device *pdev)
setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+ /*
+ * Read the Compile Time paramters and SCFGR to determine
+ * if Virtualization is enabled for this platform
+ */
+ comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
+ scfgr = rd_reg32(&topregs->ctrl.scfgr);
+
+ ctrlpriv->virt_en = 0;
+ if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+ /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+ * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+ */
+ if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+ (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+ (scfgr & SCFGR_VIRT_EN)))
+ ctrlpriv->virt_en = 1;
+ } else {
+ /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+ if (comp_params & CTPR_MS_VIRT_EN_POR)
+ ctrlpriv->virt_en = 1;
+ }
+
+ if (ctrlpriv->virt_en == 1)
+ setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
+
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
- dma_set_mask(dev, DMA_BIT_MASK(40));
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
else
- dma_set_mask(dev, DMA_BIT_MASK(36));
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
else
- dma_set_mask(dev, DMA_BIT_MASK(32));
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
/*
* Detect and enable JobRs
@@ -432,8 +473,9 @@ static int caam_probe(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
- ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
- GFP_KERNEL);
+ ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
+ sizeof(struct platform_device *) * rspec,
+ GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
iounmap(&topregs->ctrl);
return -ENOMEM;
@@ -456,8 +498,9 @@ static int caam_probe(struct platform_device *pdev)
}
/* Check to see if QI present. If so, enable */
- ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
- CTPR_QI_MASK);
+ ctrlpriv->qi_present =
+ !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
+ CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
/* This is all that's required to physically enable QI */
@@ -471,13 +514,13 @@ static int caam_probe(struct platform_device *pdev)
return -ENOMEM;
}
- cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
+ cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
*/
- if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+ if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
/*
@@ -531,7 +574,8 @@ static int caam_probe(struct platform_device *pdev)
/* NOTE: RTIC detection ought to go here, around Si time */
- caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
+ caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
+ (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
@@ -547,7 +591,7 @@ static int caam_probe(struct platform_device *pdev)
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
- ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
+ ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
/* Controller-level - performance monitor counters */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 7e4500f18df6..d397ff9d56fd 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -321,7 +321,6 @@ struct sec4_sg_entry {
/* Continue - Not the last FIFO store to come */
#define FIFOST_CONT_SHIFT 23
#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
/*
* Extended Length - use 32-bit extended length that
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 6d85fcc5bd0a..97363db4e56e 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -82,6 +82,7 @@ struct caam_drv_private {
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
+ int virt_en; /* Virtualization enabled in CAAM */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b512a4ba7569..4d18e27ffa9e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -476,11 +476,11 @@ static int caam_jr_probe(struct platform_device *pdev)
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
- dma_set_mask(jrdev, DMA_BIT_MASK(40));
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
else
- dma_set_mask(jrdev, DMA_BIT_MASK(36));
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
else
- dma_set_mask(jrdev, DMA_BIT_MASK(32));
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index cbde8b95a6f8..f48e344ffc39 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -84,6 +84,7 @@
#endif
#ifndef CONFIG_64BIT
+#ifdef __BIG_ENDIAN
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
@@ -95,6 +96,21 @@ static inline u64 rd_reg64(u64 __iomem *reg)
return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
((u64)rd_reg32((u32 __iomem *)reg + 1));
}
+#else
+#ifdef __LITTLE_ENDIAN
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+ wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
+ wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+ return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
+ ((u64)rd_reg32((u32 __iomem *)reg));
+}
+#endif
+#endif
#endif
/*
@@ -114,45 +130,45 @@ struct jr_outentry {
*/
/* Number of DECOs */
-#define CHA_NUM_DECONUM_SHIFT 56
-#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
+#define CHA_NUM_MS_DECONUM_SHIFT 24
+#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
/* CHA Version IDs */
-#define CHA_ID_AES_SHIFT 0
-#define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT)
+#define CHA_ID_LS_AES_SHIFT 0
+#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
-#define CHA_ID_DES_SHIFT 4
-#define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT)
+#define CHA_ID_LS_DES_SHIFT 4
+#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
-#define CHA_ID_ARC4_SHIFT 8
-#define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_LS_ARC4_SHIFT 8
+#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
-#define CHA_ID_MD_SHIFT 12
-#define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT)
+#define CHA_ID_LS_MD_SHIFT 12
+#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_RNG_SHIFT 16
-#define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_LS_RNG_SHIFT 16
+#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
-#define CHA_ID_SNW8_SHIFT 20
-#define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT)
+#define CHA_ID_LS_SNW8_SHIFT 20
+#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT)
-#define CHA_ID_KAS_SHIFT 24
-#define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT)
+#define CHA_ID_LS_KAS_SHIFT 24
+#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT)
-#define CHA_ID_PK_SHIFT 28
-#define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT)
+#define CHA_ID_LS_PK_SHIFT 28
+#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT)
-#define CHA_ID_CRC_SHIFT 32
-#define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT)
+#define CHA_ID_MS_CRC_SHIFT 0
+#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT)
-#define CHA_ID_SNW9_SHIFT 36
-#define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT)
+#define CHA_ID_MS_SNW9_SHIFT 4
+#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT)
-#define CHA_ID_DECO_SHIFT 56
-#define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT)
+#define CHA_ID_MS_DECO_SHIFT 24
+#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT)
-#define CHA_ID_JR_SHIFT 60
-#define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT)
+#define CHA_ID_MS_JR_SHIFT 28
+#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
struct sec_vid {
u16 ip_id;
@@ -172,10 +188,14 @@ struct caam_perfmon {
u64 rsvd[13];
/* CAAM Hardware Instantiation Parameters fa0-fbf */
- u64 cha_rev; /* CRNR - CHA Revision Number */
-#define CTPR_QI_SHIFT 57
-#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT)
- u64 comp_parms; /* CTPR - Compile Parameters Register */
+ u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/
+ u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
+#define CTPR_MS_QI_SHIFT 25
+#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_VIRT_EN_INCL 0x00000001
+#define CTPR_MS_VIRT_EN_POR 0x00000002
+ u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
+ u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
u64 rsvd1[2];
/* CAAM Global Status fc0-fdf */
@@ -189,9 +209,12 @@ struct caam_perfmon {
/* Component Instantiation Parameters fe0-fff */
u32 rtic_id; /* RVID - RTIC Version ID */
u32 ccb_id; /* CCBVID - CCB Version ID */
- u64 cha_id; /* CHAVID - CHA Version ID */
- u64 cha_num; /* CHANUM - CHA Number */
- u64 caam_id; /* CAAMVID - CAAM Version ID */
+ u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/
+ u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/
+ u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */
+ u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/
+ u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */
+ u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
};
/* LIODN programming for DMA configuration */
@@ -304,9 +327,12 @@ struct caam_ctrl {
/* Bus Access Configuration Section 010-11f */
/* Read/Writable */
struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
- u32 rsvd3[12];
+ u32 rsvd3[11];
+ u32 jrstart; /* JRSTART - Job Ring Start Register */
struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
- u32 rsvd4[7];
+ u32 rsvd4[5];
+ u32 deco_rsr; /* DECORSR - Deco Request Source */
+ u32 rsvd11;
u32 deco_rq; /* DECORR - DECO Request */
struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
u32 rsvd5[22];
@@ -347,7 +373,10 @@ struct caam_ctrl {
#define MCFGR_DMA_RESET 0x10000000
#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
#define SCFGR_RDBENABLE 0x00000400
+#define SCFGR_VIRT_EN 0x00008000
#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
+#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */
+#define DECORSR_VALID 0x80000000
#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
/* AXI read cache control */
@@ -365,6 +394,12 @@ struct caam_ctrl {
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
#define MCFGR_BURST_64 0x00000001 /* Max burst size */
+/* JRSTART register offsets */
+#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
+#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */
+#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */
+#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */
+
/*
* caam_job_ring - direct job ring setup
* 1-4 possible per instantiation, base + 1000/2000/3000/4000
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index d3505a018720..7f592d8d07bb 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,6 +1,11 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := ccp-dev.o ccp-ops.o
+ifdef CONFIG_X86
ccp-objs += ccp-pci.o
+endif
+ifdef CONFIG_ARM64
+ccp-objs += ccp-platform.o
+endif
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2c7816149b01..a7d110652a74 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -20,7 +20,9 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/cpu.h>
+#ifdef CONFIG_X86
#include <asm/cpu_device_id.h>
+#endif
#include <linux/ccp.h>
#include "ccp-dev.h"
@@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp)
/* Build queue interrupt mask (two interrupts per queue) */
qim |= cmd_q->int_ok | cmd_q->int_err;
+#ifdef CONFIG_ARM64
+ /* For arm64 set the recommended queue cache settings */
+ iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
+ (CMD_Q_CACHE_INC * i));
+#endif
+
dev_dbg(dev, "queue #%u available\n", i);
}
if (ccp->cmd_q_count == 0) {
@@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
}
#endif
+#ifdef CONFIG_X86
static const struct x86_cpu_id ccp_support[] = {
{ X86_VENDOR_AMD, 22, },
};
+#endif
static int __init ccp_mod_init(void)
{
+#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
int ret;
@@ -589,12 +600,30 @@ static int __init ccp_mod_init(void)
break;
}
+#endif
+
+#ifdef CONFIG_ARM64
+ int ret;
+
+ ret = ccp_platform_init();
+ if (ret)
+ return ret;
+
+ /* Don't leave the driver loaded if init failed */
+ if (!ccp_get_device()) {
+ ccp_platform_exit();
+ return -ENODEV;
+ }
+
+ return 0;
+#endif
return -ENODEV;
}
static void __exit ccp_mod_exit(void)
{
+#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
switch (cpuinfo->x86) {
@@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void)
ccp_pci_exit();
break;
}
+#endif
+
+#ifdef CONFIG_ARM64
+ ccp_platform_exit();
+#endif
}
module_init(ccp_mod_init);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 7ec536e702ec..62ff35a6b9ec 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -23,8 +23,6 @@
#include <linux/hw_random.h>
-#define IO_OFFSET 0x20000
-
#define MAX_DMAPOOL_NAME_LEN 32
#define MAX_HW_QUEUES 5
@@ -32,6 +30,9 @@
#define TRNG_RETRIES 10
+#define CACHE_NONE 0x00
+#define CACHE_WB_NO_ALLOC 0xb7
+
/****** Register Mappings ******/
#define Q_MASK_REG 0x000
@@ -50,7 +51,7 @@
#define CMD_Q_INT_STATUS_BASE 0x214
#define CMD_Q_STATUS_INCR 0x20
-#define CMD_Q_CACHE 0x228
+#define CMD_Q_CACHE_BASE 0x228
#define CMD_Q_CACHE_INC 0x20
#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f);
@@ -194,6 +195,7 @@ struct ccp_device {
void *dev_specific;
int (*get_irq)(struct ccp_device *ccp);
void (*free_irq)(struct ccp_device *ccp);
+ unsigned int irq;
/*
* I/O area used for device communication. The register mapping
@@ -254,12 +256,18 @@ struct ccp_device {
/* Suspend support */
unsigned int suspending;
wait_queue_head_t suspend_queue;
+
+ /* DMA caching attribute support */
+ unsigned int axcache;
};
int ccp_pci_init(void);
void ccp_pci_exit(void);
+int ccp_platform_init(void);
+void ccp_platform_exit(void);
+
struct ccp_device *ccp_alloc_struct(struct device *dev);
int ccp_init(struct ccp_device *ccp);
void ccp_destroy(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 9ae006d69df4..8729364261d7 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_ksb;
ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
- true);
+ false);
ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
@@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_exp;
ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
- true);
+ false);
src.address += o_len; /* Adjust the address for the copy operation */
ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
- true);
+ false);
src.address -= o_len; /* Reset the address to original value */
/* Prepare the output area for the operation */
@@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Copy the ECC modulus */
ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
/* Copy the first operand */
ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
ecc->u.mm.operand_1_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
/* Copy the second operand */
ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
ecc->u.mm.operand_2_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
}
@@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Copy the ECC modulus */
ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
/* Copy the first point X and Y coordinate */
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
ecc->u.pm.point_1.x_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
ecc->u.pm.point_1.y_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
/* Set the first point Z coordianate to 1 */
@@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Copy the second point X and Y coordinate */
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
ecc->u.pm.point_2.x_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
ecc->u.pm.point_2.y_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
/* Set the second point Z coordianate to 1 */
@@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Copy the Domain "a" parameter */
ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
ecc->u.pm.domain_a_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
/* Copy the scalar value */
ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
ecc->u.pm.scalar_len,
- CCP_ECC_OPERAND_SIZE, true);
+ CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
}
}
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 0d746236df5e..180cc87b4dbb 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <linux/dma-mapping.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
@@ -24,6 +26,8 @@
#include "ccp-dev.h"
#define IO_BAR 2
+#define IO_OFFSET 0x20000
+
#define MSIX_VECTORS 2
struct ccp_msix {
@@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
if (ret)
return ret;
- ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
+ ccp->irq = pdev->irq;
+ ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi;
@@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
dev);
pci_disable_msix(pdev);
} else {
- free_irq(pdev->irq, dev);
+ free_irq(ccp->irq, dev);
pci_disable_msi(pdev);
}
}
@@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp)
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
resource_size_t io_len;
unsigned long io_flags;
- int bar;
io_flags = pci_resource_flags(pdev, IO_BAR);
io_len = pci_resource_len(pdev, IO_BAR);
if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
return IO_BAR;
- for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
- io_flags = pci_resource_flags(pdev, bar);
- io_len = pci_resource_len(pdev, bar);
- if ((io_flags & IORESOURCE_MEM) &&
- (io_len >= (IO_OFFSET + 0x800)))
- return bar;
- }
-
return -EIO;
}
@@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
ccp->io_regs = ccp->io_map + IO_OFFSET;
- ret = dma_set_mask(dev, DMA_BIT_MASK(48));
- if (ret == 0) {
- ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
- dev_err(dev,
- "pci_set_consistent_dma_mask failed (%d)\n",
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
ret);
- goto e_bar0;
- }
- } else {
- ret = dma_set_mask(dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
- goto e_bar0;
+ goto e_iomap;
}
}
@@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = ccp_init(ccp);
if (ret)
- goto e_bar0;
+ goto e_iomap;
dev_notice(dev, "enabled\n");
return 0;
-e_bar0:
+e_iomap:
pci_iounmap(pdev, ccp->io_map);
e_device:
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
new file mode 100644
index 000000000000..b0a2806908f1
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -0,0 +1,230 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/of.h>
+
+#include "ccp-dev.h"
+
+
+static int ccp_get_irq(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ int ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+
+ ccp->irq = ret;
+ ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+ if (ret) {
+ dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccp_get_irqs(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ int ret;
+
+ ret = ccp_get_irq(ccp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get an interrupt */
+ dev_notice(dev, "could not enable interrupts (%d)\n", ret);
+
+ return ret;
+}
+
+static void ccp_free_irqs(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+
+ free_irq(ccp->irq, dev);
+}
+
+static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct resource *ior;
+
+ ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (ior && (resource_size(ior) >= 0x800))
+ return ior;
+
+ return NULL;
+}
+
+static int ccp_platform_probe(struct platform_device *pdev)
+{
+ struct ccp_device *ccp;
+ struct device *dev = &pdev->dev;
+ struct resource *ior;
+ int ret;
+
+ ret = -ENOMEM;
+ ccp = ccp_alloc_struct(dev);
+ if (!ccp)
+ goto e_err;
+
+ ccp->dev_specific = NULL;
+ ccp->get_irq = ccp_get_irqs;
+ ccp->free_irq = ccp_free_irqs;
+
+ ior = ccp_find_mmio_area(ccp);
+ ccp->io_map = devm_ioremap_resource(dev, ior);
+ if (IS_ERR(ccp->io_map)) {
+ ret = PTR_ERR(ccp->io_map);
+ goto e_free;
+ }
+ ccp->io_regs = ccp->io_map;
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ *(dev->dma_mask) = DMA_BIT_MASK(48);
+ dev->coherent_dma_mask = DMA_BIT_MASK(48);
+
+ if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ ccp->axcache = CACHE_WB_NO_ALLOC;
+ else
+ ccp->axcache = CACHE_NONE;
+
+ dev_set_drvdata(dev, ccp);
+
+ ret = ccp_init(ccp);
+ if (ret)
+ goto e_free;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_free:
+ kfree(ccp);
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static int ccp_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+
+ ccp_destroy(ccp);
+
+ kfree(ccp);
+
+ dev_notice(dev, "disabled\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ccp_platform_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 1;
+
+ /* Wake all the queue kthreads to prepare for suspend */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ wake_up_process(ccp->cmd_q[i].kthread);
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* Wait for all queue kthreads to say they're done */
+ while (!ccp_queues_suspended(ccp))
+ wait_event_interruptible(ccp->suspend_queue,
+ ccp_queues_suspended(ccp));
+
+ return 0;
+}
+
+static int ccp_platform_resume(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 0;
+
+ /* Wake up all the kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ ccp->cmd_q[i].suspended = 0;
+ wake_up_process(ccp->cmd_q[i].kthread);
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id ccp_platform_ids[] = {
+ { .compatible = "amd,ccp-seattle-v1a" },
+ { },
+};
+
+static struct platform_driver ccp_platform_driver = {
+ .driver = {
+ .name = "AMD Cryptographic Coprocessor",
+ .owner = THIS_MODULE,
+ .of_match_table = ccp_platform_ids,
+ },
+ .probe = ccp_platform_probe,
+ .remove = ccp_platform_remove,
+#ifdef CONFIG_PM
+ .suspend = ccp_platform_suspend,
+ .resume = ccp_platform_resume,
+#endif
+};
+
+int ccp_platform_init(void)
+{
+ return platform_driver_register(&ccp_platform_driver);
+}
+
+void ccp_platform_exit(void)
+{
+ platform_driver_unregister(&ccp_platform_driver);
+}
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 502edf0a2933..544f6d327ede 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1247,7 +1247,7 @@ static struct vio_device_id nx842_driver_ids[] = {
static struct vio_driver nx842_driver = {
.name = MODULE_NAME,
.probe = nx842_probe,
- .remove = nx842_remove,
+ .remove = __exit_p(nx842_remove),
.get_desired_dma = nx842_get_desired_dma,
.id_table = nx842_driver_ids,
};
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
new file mode 100644
index 000000000000..49bede2a9f77
--- /dev/null
+++ b/drivers/crypto/qat/Kconfig
@@ -0,0 +1,23 @@
+config CRYPTO_DEV_QAT
+ tristate
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select FW_LOADER
+
+config CRYPTO_DEV_QAT_DH895xCC
+ tristate "Support for Intel(R) DH895xCC"
+ depends on X86 && PCI
+ default n
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_dh895xcc.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
new file mode 100644
index 000000000000..d11481be225e
--- /dev/null
+++ b/drivers/crypto/qat/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
new file mode 100644
index 000000000000..e0424dc382fe
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+ adf_ctl_drv.o \
+ adf_dev_mgr.o \
+ adf_init.o \
+ adf_accel_engine.o \
+ adf_aer.o \
+ adf_transport.o \
+ qat_crypto.o \
+ qat_algs.o \
+ qat_uclo.o \
+ qat_hal.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
new file mode 100644
index 000000000000..9282381b03ce
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -0,0 +1,205 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/module.h>
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include "adf_cfg_common.h"
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 16
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+ ADF_ACCEL_CAPABILITIES_NULL = 0,
+ ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+ ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+ ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+ ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+ ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+ ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+ ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+ resource_size_t base_addr;
+ void __iomem *virt_addr;
+ resource_size_t size;
+} __packed;
+
+struct adf_accel_msix {
+ struct msix_entry *entries;
+ char **names;
+} __packed;
+
+struct adf_accel_pci {
+ struct pci_dev *pci_dev;
+ struct adf_accel_msix msix_entries;
+ struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+ uint8_t revid;
+ uint8_t sku;
+} __packed;
+
+enum dev_state {
+ DEV_DOWN = 0,
+ DEV_UP
+};
+
+enum dev_sku_info {
+ DEV_SKU_1 = 0,
+ DEV_SKU_2,
+ DEV_SKU_3,
+ DEV_SKU_4,
+ DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+ switch (info) {
+ case DEV_SKU_1:
+ return "SKU1";
+ case DEV_SKU_2:
+ return "SKU2";
+ case DEV_SKU_3:
+ return "SKU3";
+ case DEV_SKU_4:
+ return "SKU4";
+ case DEV_SKU_UNKNOWN:
+ default:
+ break;
+ }
+ return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+ const char *name;
+ const enum adf_device_type type;
+ uint32_t instances;
+} __packed;
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_hw_device_data {
+ struct adf_hw_device_class *dev_class;
+ uint32_t (*get_accel_mask)(uint32_t fuse);
+ uint32_t (*get_ae_mask)(uint32_t fuse);
+ uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
+ uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
+ uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
+ uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+ enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+ void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
+ void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
+ int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+ void (*free_irq)(struct adf_accel_dev *accel_dev);
+ void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+ const char *fw_name;
+ uint32_t pci_dev_id;
+ uint32_t fuses;
+ uint32_t accel_capabilities_mask;
+ uint16_t accel_mask;
+ uint16_t ae_mask;
+ uint16_t tx_rings_mask;
+ uint8_t tx_rx_gap;
+ uint8_t instance_id;
+ uint8_t num_banks;
+ uint8_t num_accel;
+ uint8_t num_logical_accel;
+ uint8_t num_engines;
+} __packed;
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+ __raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+ struct icp_qat_fw_loader_handle *fw_loader;
+ const struct firmware *uof_fw;
+};
+
+struct adf_accel_dev {
+ struct adf_etr_data *transport;
+ struct adf_hw_device_data *hw_device;
+ struct adf_cfg_device_data *cfg;
+ struct adf_fw_loader_data *fw_loader;
+ struct adf_admin_comms *admin;
+ struct list_head crypto_list;
+ unsigned long status;
+ atomic_t ref_count;
+ struct dentry *debugfs_dir;
+ struct list_head list;
+ struct module *owner;
+ uint8_t accel_id;
+ uint8_t numa_node;
+ struct adf_accel_pci accel_pci_dev;
+} __packed;
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
new file mode 100644
index 000000000000..c77453b900a3
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -0,0 +1,168 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ void *uof_addr;
+ uint32_t uof_size;
+
+ if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+ &accel_dev->accel_pci_dev.pci_dev->dev)) {
+ pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name);
+ return -EFAULT;
+ }
+
+ uof_size = loader_data->uof_fw->size;
+ uof_addr = (void *)loader_data->uof_fw->data;
+ if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
+ pr_err("QAT: Failed to map UOF\n");
+ goto out_err;
+ }
+ if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+ pr_err("QAT: Failed to map UOF\n");
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ release_firmware(loader_data->uof_fw);
+ return -EFAULT;
+}
+
+int adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+ release_firmware(loader_data->uof_fw);
+ qat_uclo_del_uof_obj(loader_data->fw_loader);
+ qat_hal_deinit(loader_data->fw_loader);
+ loader_data->fw_loader = NULL;
+ return 0;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+ for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+ if (hw_data->ae_mask & (1 << ae)) {
+ qat_hal_start(loader_data->fw_loader, ae, 0xFF);
+ ae_ctr++;
+ }
+ }
+ pr_info("QAT: qat_dev%d started %d acceleration engines\n",
+ accel_dev->accel_id, ae_ctr);
+ return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+ for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+ if (hw_data->ae_mask & (1 << ae)) {
+ qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+ ae_ctr++;
+ }
+ }
+ pr_info("QAT: qat_dev%d stopped %d acceleration engines\n",
+ accel_dev->accel_id, ae_ctr);
+ return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+ qat_hal_reset(loader_data->fw_loader);
+ if (qat_hal_clr_reset(loader_data->fw_loader))
+ return -EFAULT;
+
+ return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data;
+
+ loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+ if (!loader_data)
+ return -ENOMEM;
+
+ accel_dev->fw_loader = loader_data;
+ if (qat_hal_init(accel_dev)) {
+ pr_err("QAT: Failed to init the AEs\n");
+ kfree(loader_data);
+ return -EFAULT;
+ }
+ if (adf_ae_reset(accel_dev, 0)) {
+ pr_err("QAT: Failed to reset the AEs\n");
+ qat_hal_deinit(loader_data->fw_loader);
+ kfree(loader_data);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+ kfree(accel_dev->fw_loader);
+ accel_dev->fw_loader = NULL;
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
new file mode 100644
index 000000000000..c29d4c3926bf
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -0,0 +1,259 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ pr_info("QAT: Acceleration driver hardware error detected.\n");
+ if (!accel_dev) {
+ pr_err("QAT: Can't find acceleration device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (state == pci_channel_io_perm_failure) {
+ pr_err("QAT: Can't recover from device error\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+ int mode;
+ struct adf_accel_dev *accel_dev;
+ struct completion compl;
+ struct work_struct reset_work;
+};
+
+#define PPDSTAT_OFFSET 0x7E
+static void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ struct pci_dev *parent = pdev->bus->self;
+ uint16_t ppdstat = 0, bridge_ctl = 0;
+ int pending = 0;
+
+ pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id);
+ pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
+ pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
+ if (pending) {
+ int ctr = 0;
+
+ do {
+ msleep(100);
+ pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
+ pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
+ } while (pending && ctr++ < 10);
+ }
+
+ if (pending)
+ pr_info("QAT: Transaction still in progress. Proceeding\n");
+
+ pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+ bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+ msleep(100);
+ bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+ msleep(100);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+ struct adf_reset_dev_data *reset_data =
+ container_of(work, struct adf_reset_dev_data, reset_work);
+ struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+ adf_dev_restarting_notify(accel_dev);
+ adf_dev_stop(accel_dev);
+ adf_dev_restore(accel_dev);
+ if (adf_dev_start(accel_dev)) {
+ /* The device hanged and we can't restart it so stop here */
+ dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+ kfree(reset_data);
+ WARN(1, "QAT: device restart failed. Device is unusable\n");
+ return;
+ }
+ adf_dev_restarted_notify(accel_dev);
+ clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+ /* The dev is back alive. Notify the caller if in sync mode */
+ if (reset_data->mode == ADF_DEV_RESET_SYNC)
+ complete(&reset_data->compl);
+ else
+ kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+ enum adf_dev_reset_mode mode)
+{
+ struct adf_reset_dev_data *reset_data;
+
+ if (adf_dev_started(accel_dev) &&
+ !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+ return 0;
+
+ set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+ reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+ if (!reset_data)
+ return -ENOMEM;
+ reset_data->accel_dev = accel_dev;
+ init_completion(&reset_data->compl);
+ reset_data->mode = mode;
+ INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+ queue_work(device_reset_wq, &reset_data->reset_work);
+
+ /* If in sync mode wait for the result */
+ if (mode == ADF_DEV_RESET_SYNC) {
+ int ret = 0;
+ /* Maximum device reset time is 10 seconds */
+ unsigned long wait_jiffies = msecs_to_jiffies(10000);
+ unsigned long timeout = wait_for_completion_timeout(
+ &reset_data->compl, wait_jiffies);
+ if (!timeout) {
+ pr_err("QAT: Reset device timeout expired\n");
+ ret = -EFAULT;
+ }
+ kfree(reset_data);
+ return ret;
+ }
+ return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Can't find acceleration device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+ pr_info("QAT: Acceleration driver reset completed\n");
+ pr_info("QAT: Device is up and runnig\n");
+}
+
+static struct pci_error_handlers adf_err_handler = {
+ .error_detected = adf_error_detected,
+ .slot_reset = adf_slot_reset,
+ .resume = adf_resume,
+};
+
+/**
+ * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ * @adf: PCI device driver owning the given acceleration device.
+ *
+ * Function enables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ adf->err_handler = &adf_err_handler;
+ pci_enable_pcie_error_reporting(pdev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_aer);
+
+/**
+ * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_disable_aer(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ pci_disable_pcie_error_reporting(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_disable_aer);
+
+int adf_init_aer(void)
+{
+ device_reset_wq = create_workqueue("qat_device_reset_wq");
+ return (device_reset_wq == NULL) ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+ if (device_reset_wq)
+ destroy_workqueue(device_reset_wq);
+ device_reset_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
new file mode 100644
index 000000000000..aba7f1d043fb
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -0,0 +1,361 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+ mutex_lock(&qat_cfg_read_lock);
+ return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+ struct list_head *list;
+ struct adf_cfg_section *sec =
+ list_entry(v, struct adf_cfg_section, list);
+
+ seq_printf(sfile, "[%s]\n", sec->name);
+ list_for_each(list, &sec->param_head) {
+ struct adf_cfg_key_val *ptr =
+ list_entry(list, struct adf_cfg_key_val, list);
+ seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+ }
+ return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+ return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+ mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+ .start = qat_dev_cfg_start,
+ .next = qat_dev_cfg_next,
+ .stop = qat_dev_cfg_stop,
+ .show = qat_dev_cfg_show
+};
+
+static int qat_dev_cfg_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &qat_dev_cfg_sops);
+
+ if (!ret) {
+ struct seq_file *seq_f = file->private_data;
+
+ seq_f->private = inode->i_private;
+ }
+ return ret;
+}
+
+static const struct file_operations qat_dev_cfg_fops = {
+ .open = qat_dev_cfg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data;
+
+ dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+ if (!dev_cfg_data)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+ init_rwsem(&dev_cfg_data->lock);
+ accel_dev->cfg = dev_cfg_data;
+
+ /* accel_dev->debugfs_dir should always be non-NULL here */
+ dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+ accel_dev->debugfs_dir,
+ dev_cfg_data,
+ &qat_dev_cfg_fops);
+ if (!dev_cfg_data->debug) {
+ pr_err("QAT: Failed to create qat cfg debugfs entry.\n");
+ kfree(dev_cfg_data);
+ accel_dev->cfg = NULL;
+ return -EFAULT;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ down_write(&dev_cfg_data->lock);
+ adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+ up_write(&dev_cfg_data->lock);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ down_write(&dev_cfg_data->lock);
+ adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+ up_write(&dev_cfg_data->lock);
+ debugfs_remove(dev_cfg_data->debug);
+ kfree(dev_cfg_data);
+ accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+ struct adf_cfg_section *sec)
+{
+ list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+ struct list_head *list_ptr, *tmp;
+
+ list_for_each_prev_safe(list_ptr, tmp, head) {
+ struct adf_cfg_key_val *ptr =
+ list_entry(list_ptr, struct adf_cfg_key_val, list);
+ list_del(list_ptr);
+ kfree(ptr);
+ }
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+ struct adf_cfg_section *ptr;
+ struct list_head *list, *tmp;
+
+ list_for_each_prev_safe(list, tmp, head) {
+ ptr = list_entry(list, struct adf_cfg_section, list);
+ adf_cfg_keyval_del_all(&ptr->param_head);
+ list_del(list);
+ kfree(ptr);
+ }
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+ const char *key)
+{
+ struct list_head *list;
+
+ list_for_each(list, &s->param_head) {
+ struct adf_cfg_key_val *ptr =
+ list_entry(list, struct adf_cfg_key_val, list);
+ if (!strcmp(ptr->key, key))
+ return ptr;
+ }
+ return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+ const char *sec_name)
+{
+ struct adf_cfg_device_data *cfg = accel_dev->cfg;
+ struct list_head *list;
+
+ list_for_each(list, &cfg->sec_list) {
+ struct adf_cfg_section *ptr =
+ list_entry(list, struct adf_cfg_section, list);
+ if (!strcmp(ptr->name, sec_name))
+ return ptr;
+ }
+ return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+ const char *sec_name,
+ const char *key_name,
+ char *val)
+{
+ struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+ struct adf_cfg_key_val *keyval = NULL;
+
+ if (sec)
+ keyval = adf_cfg_key_value_find(sec, key_name);
+ if (keyval) {
+ memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev: Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+ const char *section_name,
+ const char *key, const void *val,
+ enum adf_cfg_val_type type)
+{
+ struct adf_cfg_device_data *cfg = accel_dev->cfg;
+ struct adf_cfg_key_val *key_val;
+ struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+ section_name);
+ if (!section)
+ return -EFAULT;
+
+ key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+ if (!key_val)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&key_val->list);
+ strlcpy(key_val->key, key, sizeof(key_val->key));
+
+ if (type == ADF_DEC) {
+ snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+ "%ld", (*((long *)val)));
+ } else if (type == ADF_STR) {
+ strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+ } else if (type == ADF_HEX) {
+ snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+ "0x%lx", (unsigned long)val);
+ } else {
+ pr_err("QAT: Unknown type given.\n");
+ kfree(key_val);
+ return -1;
+ }
+ key_val->type = type;
+ down_write(&cfg->lock);
+ adf_cfg_keyval_add(key_val, section);
+ up_write(&cfg->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev: Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+ struct adf_cfg_device_data *cfg = accel_dev->cfg;
+ struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+ if (sec)
+ return 0;
+
+ sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+
+ strlcpy(sec->name, name, sizeof(sec->name));
+ INIT_LIST_HEAD(&sec->param_head);
+ down_write(&cfg->lock);
+ list_add_tail(&sec->list, &cfg->sec_list);
+ up_write(&cfg->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+ const char *section, const char *name,
+ char *value)
+{
+ struct adf_cfg_device_data *cfg = accel_dev->cfg;
+ int ret;
+
+ down_read(&cfg->lock);
+ ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+ up_read(&cfg->lock);
+ return ret;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
new file mode 100644
index 000000000000..6a9c6f6b5ec9
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg.h
@@ -0,0 +1,87 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ enum adf_cfg_val_type type;
+ struct list_head list;
+};
+
+struct adf_cfg_section {
+ char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+ struct list_head list;
+ struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+ struct list_head sec_list;
+ struct dentry *debug;
+ struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+ const char *section_name,
+ const char *key, const void *val,
+ enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+ const char *section, const char *name, char *value);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
new file mode 100644
index 000000000000..88b82187ac35
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -0,0 +1,100 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES 32
+
+enum adf_cfg_val_type {
+ ADF_DEC,
+ ADF_HEX,
+ ADF_STR
+};
+
+enum adf_device_type {
+ DEV_UNKNOWN = 0,
+ DEV_DH895XCC,
+};
+
+struct adf_dev_status_info {
+ enum adf_device_type type;
+ uint8_t accel_id;
+ uint8_t instance_id;
+ uint8_t num_ae;
+ uint8_t num_accel;
+ uint8_t num_logical_accel;
+ uint8_t banks_per_accel;
+ uint8_t state;
+ uint8_t bus;
+ uint8_t dev;
+ uint8_t fun;
+ char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+ struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+ struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+ struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
new file mode 100644
index 000000000000..c7ac758ebc90
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -0,0 +1,83 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_RND_TX "RingNrbgTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RinSymRx"
+#define ADF_RING_RND_RX "RingNrbgRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_BANK_NUM "BankNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+ ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+ ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+ ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+ ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
new file mode 100644
index 000000000000..0c38a155a865
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -0,0 +1,94 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ union {
+ char *user_val_ptr;
+ uint64_t padding1;
+ };
+ union {
+ struct adf_user_cfg_key_val *prev;
+ uint64_t padding2;
+ };
+ union {
+ struct adf_user_cfg_key_val *next;
+ uint64_t padding3;
+ };
+ enum adf_cfg_val_type type;
+};
+
+struct adf_user_cfg_section {
+ char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+ union {
+ struct adf_user_cfg_key_val *params;
+ uint64_t padding1;
+ };
+ union {
+ struct adf_user_cfg_section *prev;
+ uint64_t padding2;
+ };
+ union {
+ struct adf_user_cfg_section *next;
+ uint64_t padding3;
+ };
+};
+
+struct adf_user_cfg_ctl_data {
+ union {
+ struct adf_user_cfg_section *config_section;
+ uint64_t padding;
+ };
+ uint8_t device_id;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
new file mode 100644
index 000000000000..5e8f9d431e5d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -0,0 +1,192 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_ORPHAN_TH_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+
+enum adf_dev_reset_mode {
+ ADF_DEV_RESET_ASYNC = 0,
+ ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+ ADF_EVENT_INIT = 0,
+ ADF_EVENT_START,
+ ADF_EVENT_STOP,
+ ADF_EVENT_SHUTDOWN,
+ ADF_EVENT_RESTARTING,
+ ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+ int (*event_hld)(struct adf_accel_dev *accel_dev,
+ enum adf_event event);
+ unsigned long init_status;
+ unsigned long start_status;
+ char *name;
+ struct list_head list;
+ int admin;
+};
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_init(struct adf_accel_dev *accel_dev);
+int adf_dev_start(struct adf_accel_dev *accel_dev);
+int adf_dev_stop(struct adf_accel_dev *accel_dev);
+int adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_ctl_dev_register(void);
+void adf_ctl_dev_unregister(void);
+int adf_processes_dev_register(void);
+void adf_processes_dev_unregister(void);
+
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(uint32_t id);
+void adf_devmgr_get_num_dev(uint32_t *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+void adf_disable_aer(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+int qat_algs_init(void);
+void qat_algs_exit(void);
+int qat_algs_register(void);
+int qat_algs_unregister(void);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+ unsigned int ctx_mask);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+ unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, enum icp_qat_uof_regtype lm_type,
+ unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int uaddr,
+ unsigned int words_num, uint64_t *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+ unsigned int uword_addr, unsigned int words_num,
+ unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae,
+ struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ unsigned short reg_num, unsigned int regdata);
+int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned short lm_addr, unsigned int value);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
new file mode 100644
index 000000000000..d97069b8a8e4
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -0,0 +1,490 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = adf_ctl_ioctl,
+ .compat_ioctl = adf_ctl_ioctl,
+};
+
+struct adf_ctl_drv_info {
+ unsigned int major;
+ struct cdev drv_cdev;
+ struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adt_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+ device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0));
+ cdev_del(&adt_ctl_drv.drv_cdev);
+ class_destroy(adt_ctl_drv.drv_class);
+ unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+ dev_t dev_id;
+ struct device *drv_device;
+
+ if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+ pr_err("QAT: unable to allocate chrdev region\n");
+ return -EFAULT;
+ }
+
+ adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(adt_ctl_drv.drv_class)) {
+ pr_err("QAT: class_create failed for adf_ctl\n");
+ goto err_chrdev_unreg;
+ }
+ adt_ctl_drv.major = MAJOR(dev_id);
+ cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops);
+ if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) {
+ pr_err("QAT: cdev add failed\n");
+ goto err_class_destr;
+ }
+
+ drv_device = device_create(adt_ctl_drv.drv_class, NULL,
+ MKDEV(adt_ctl_drv.major, 0),
+ NULL, DEVICE_NAME);
+ if (!drv_device) {
+ pr_err("QAT: failed to create device\n");
+ goto err_cdev_del;
+ }
+ return 0;
+err_cdev_del:
+ cdev_del(&adt_ctl_drv.drv_cdev);
+err_class_destr:
+ class_destroy(adt_ctl_drv.drv_class);
+err_chrdev_unreg:
+ unregister_chrdev_region(dev_id, 1);
+ return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+ unsigned long arg)
+{
+ struct adf_user_cfg_ctl_data *cfg_data;
+
+ cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+ if (!cfg_data)
+ return -ENOMEM;
+
+ /* Initialize device id to NO DEVICE as 0 is a valid device id */
+ cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+ if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+ pr_err("QAT: failed to copy from user cfg_data.\n");
+ kfree(cfg_data);
+ return -EIO;
+ }
+
+ *ctl_data = cfg_data;
+ return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+ const char *section,
+ const struct adf_user_cfg_key_val *key_val)
+{
+ if (key_val->type == ADF_HEX) {
+ long *ptr = (long *)key_val->val;
+ long val = *ptr;
+
+ if (adf_cfg_add_key_value_param(accel_dev, section,
+ key_val->key, (void *)val,
+ key_val->type)) {
+ pr_err("QAT: failed to add keyvalue.\n");
+ return -EFAULT;
+ }
+ } else {
+ if (adf_cfg_add_key_value_param(accel_dev, section,
+ key_val->key, key_val->val,
+ key_val->type)) {
+ pr_err("QAT: failed to add keyvalue.\n");
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+ struct adf_user_cfg_ctl_data *ctl_data)
+{
+ struct adf_user_cfg_key_val key_val;
+ struct adf_user_cfg_key_val *params_head;
+ struct adf_user_cfg_section section, *section_head;
+
+ section_head = ctl_data->config_section;
+
+ while (section_head) {
+ if (copy_from_user(&section, (void __user *)section_head,
+ sizeof(*section_head))) {
+ pr_err("QAT: failed to copy section info\n");
+ goto out_err;
+ }
+
+ if (adf_cfg_section_add(accel_dev, section.name)) {
+ pr_err("QAT: failed to add section.\n");
+ goto out_err;
+ }
+
+ params_head = section_head->params;
+
+ while (params_head) {
+ if (copy_from_user(&key_val, (void __user *)params_head,
+ sizeof(key_val))) {
+ pr_err("QAT: Failed to copy keyvalue.\n");
+ goto out_err;
+ }
+ if (adf_add_key_value_data(accel_dev, section.name,
+ &key_val)) {
+ goto out_err;
+ }
+ params_head = key_val.next;
+ }
+ section_head = section.next;
+ }
+ return 0;
+out_err:
+ adf_cfg_del_all(accel_dev);
+ return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct adf_user_cfg_ctl_data *ctl_data;
+ struct adf_accel_dev *accel_dev;
+
+ ret = adf_ctl_alloc_resources(&ctl_data, arg);
+ if (ret)
+ return ret;
+
+ accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+ if (!accel_dev) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (adf_dev_started(accel_dev)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+ kfree(ctl_data);
+ return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+ struct list_head *itr, *head = adf_devmgr_get_head();
+
+ list_for_each(itr, head) {
+ struct adf_accel_dev *dev =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+ if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+ pr_info("QAT: device qat_dev%d is busy\n",
+ dev->accel_id);
+ return -EBUSY;
+ }
+ }
+ }
+ return 0;
+}
+
+static int adf_ctl_stop_devices(uint32_t id)
+{
+ struct list_head *itr, *head = adf_devmgr_get_head();
+ int ret = 0;
+
+ list_for_each(itr, head) {
+ struct adf_accel_dev *accel_dev =
+ list_entry(itr, struct adf_accel_dev, list);
+ if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+ if (!adf_dev_started(accel_dev))
+ continue;
+
+ if (adf_dev_stop(accel_dev)) {
+ pr_err("QAT: Failed to stop qat_dev%d\n", id);
+ ret = -EFAULT;
+ }
+ }
+ }
+ return ret;
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct adf_user_cfg_ctl_data *ctl_data;
+
+ ret = adf_ctl_alloc_resources(&ctl_data, arg);
+ if (ret)
+ return ret;
+
+ if (adf_devmgr_verify_id(ctl_data->device_id)) {
+ pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+ if (ret)
+ goto out;
+
+ if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+ pr_info("QAT: Stopping all acceleration devices.\n");
+ else
+ pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+ ctl_data->device_id);
+
+ ret = adf_ctl_stop_devices(ctl_data->device_id);
+ if (ret)
+ pr_err("QAT: failed to stop device.\n");
+out:
+ kfree(ctl_data);
+ return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct adf_user_cfg_ctl_data *ctl_data;
+ struct adf_accel_dev *accel_dev;
+
+ ret = adf_ctl_alloc_resources(&ctl_data, arg);
+ if (ret)
+ return ret;
+
+ accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+ if (!accel_dev) {
+ pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!adf_dev_started(accel_dev)) {
+ pr_info("QAT: Starting acceleration device qat_dev%d.\n",
+ ctl_data->device_id);
+ ret = adf_dev_start(accel_dev);
+ } else {
+ pr_info("QAT: Acceleration device qat_dev%d already started.\n",
+ ctl_data->device_id);
+ }
+ if (ret) {
+ pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
+ adf_dev_stop(accel_dev);
+ }
+out:
+ kfree(ctl_data);
+ return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ uint32_t num_devices = 0;
+
+ adf_devmgr_get_num_dev(&num_devices);
+ if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct adf_hw_device_data *hw_data;
+ struct adf_dev_status_info dev_info;
+ struct adf_accel_dev *accel_dev;
+
+ if (copy_from_user(&dev_info, (void __user *)arg,
+ sizeof(struct adf_dev_status_info))) {
+ pr_err("QAT: failed to copy from user.\n");
+ return -EFAULT;
+ }
+
+ accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+ if (!accel_dev) {
+ pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+ return -ENODEV;
+ }
+ hw_data = accel_dev->hw_device;
+ dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+ dev_info.num_ae = hw_data->get_num_aes(hw_data);
+ dev_info.num_accel = hw_data->get_num_accels(hw_data);
+ dev_info.num_logical_accel = hw_data->num_logical_accel;
+ dev_info.banks_per_accel = hw_data->num_banks
+ / hw_data->num_logical_accel;
+ strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+ dev_info.instance_id = hw_data->instance_id;
+ dev_info.type = hw_data->dev_class->type;
+ dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+ dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+ dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+ if (copy_to_user((void __user *)arg, &dev_info,
+ sizeof(struct adf_dev_status_info))) {
+ pr_err("QAT: failed to copy status.\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&adf_ctl_lock))
+ return -EFAULT;
+
+ switch (cmd) {
+ case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+ ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+ break;
+
+ case IOCTL_STOP_ACCEL_DEV:
+ ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+ break;
+
+ case IOCTL_START_ACCEL_DEV:
+ ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+ break;
+
+ case IOCTL_GET_NUM_DEVICES:
+ ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+ break;
+
+ case IOCTL_STATUS_ACCEL_DEV:
+ ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+ break;
+ default:
+ pr_err("QAT: Invalid ioclt\n");
+ ret = -EFAULT;
+ break;
+ }
+ mutex_unlock(&adf_ctl_lock);
+ return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+ mutex_init(&adf_ctl_lock);
+
+ if (qat_algs_init())
+ goto err_algs_init;
+
+ if (adf_chr_drv_create())
+ goto err_chr_dev;
+
+ if (adf_init_aer())
+ goto err_aer;
+
+ if (qat_crypto_register())
+ goto err_crypto_register;
+
+ return 0;
+
+err_crypto_register:
+ adf_exit_aer();
+err_aer:
+ adf_chr_drv_destroy();
+err_chr_dev:
+ qat_algs_exit();
+err_algs_init:
+ mutex_destroy(&adf_ctl_lock);
+ return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+ adf_chr_drv_destroy();
+ adf_exit_aer();
+ qat_crypto_unregister();
+ qat_algs_exit();
+ mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
new file mode 100644
index 000000000000..ae71555c0868
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -0,0 +1,215 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static DEFINE_MUTEX(table_lock);
+static uint32_t num_devices;
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+{
+ struct list_head *itr;
+
+ if (num_devices == ADF_MAX_DEVICES) {
+ pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES);
+ return -EFAULT;
+ }
+
+ mutex_lock(&table_lock);
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr == accel_dev) {
+ mutex_unlock(&table_lock);
+ return -EEXIST;
+ }
+ }
+ atomic_set(&accel_dev->ref_count, 0);
+ list_add_tail(&accel_dev->list, &accel_table);
+ accel_dev->accel_id = num_devices++;
+ mutex_unlock(&table_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+ return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+{
+ mutex_lock(&table_lock);
+ list_del(&accel_dev->list);
+ num_devices--;
+ mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+ struct adf_accel_dev *dev = NULL;
+
+ if (!list_empty(&accel_table))
+ dev = list_first_entry(&accel_table, struct adf_accel_dev,
+ list);
+ return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @accel_dev: Pointer to pci device.
+ *
+ * Function returns acceleration device associated with the given pci device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pinter to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+ mutex_unlock(&table_lock);
+ return ptr;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr->accel_id == id) {
+ mutex_unlock(&table_lock);
+ return ptr;
+ }
+ }
+ return NULL;
+}
+
+int adf_devmgr_verify_id(uint32_t id)
+{
+ if (id == ADF_CFG_ALL_DEVICES)
+ return 0;
+
+ if (adf_devmgr_get_dev_by_id(id))
+ return 0;
+
+ return -ENODEV;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+ struct list_head *itr;
+
+ *num = 0;
+ list_for_each(itr, &accel_table) {
+ (*num)++;
+ }
+}
+
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+ return atomic_read(&accel_dev->ref_count) != 0;
+}
+
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+ if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+ if (!try_module_get(accel_dev->owner))
+ return -EFAULT;
+ return 0;
+}
+
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+ if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+ module_put(accel_dev->owner);
+}
+
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+ return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+ return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
new file mode 100644
index 000000000000..5c0e47a00a87
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -0,0 +1,388 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+ mutex_lock(&service_lock);
+ list_add(&service->list, &service_table);
+ mutex_unlock(&service_lock);
+}
+
+/**
+ * adf_service_register() - Register acceleration service in the accel framework
+ * @service: Pointer to the service
+ *
+ * Function adds the acceleration service to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_service_register(struct service_hndl *service)
+{
+ service->init_status = 0;
+ service->start_status = 0;
+ adf_service_add(service);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_service_register);
+
+static void adf_service_remove(struct service_hndl *service)
+{
+ mutex_lock(&service_lock);
+ list_del(&service->list);
+ mutex_unlock(&service_lock);
+}
+
+/**
+ * adf_service_unregister() - Unregister acceleration service from the framework
+ * @service: Pointer to the service
+ *
+ * Function remove the acceleration service from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_service_unregister(struct service_hndl *service)
+{
+ if (service->init_status || service->start_status) {
+ pr_err("QAT: Could not remove active service\n");
+ return -EFAULT;
+ }
+ adf_service_remove(service);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_service_unregister);
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+ struct list_head *list_itr;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
+ pr_info("QAT: Device not configured\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+ if (adf_ae_init(accel_dev)) {
+ pr_err("QAT: Failed to initialise Acceleration Engine\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+ if (adf_ae_fw_load(accel_dev)) {
+ pr_err("QAT: Failed to load acceleration FW\n");
+ adf_ae_fw_release(accel_dev);
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+ if (hw_data->alloc_irq(accel_dev)) {
+ pr_err("QAT: Failed to allocate interrupts\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+ /*
+ * Subservice initialisation is divided into two stages: init and start.
+ * This is to facilitate any ordering dependencies between services
+ * prior to starting any of the accelerators.
+ */
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (!service->admin)
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+ pr_err("QAT: Failed to initialise service %s\n",
+ service->name);
+ return -EFAULT;
+ }
+ set_bit(accel_dev->accel_id, &service->init_status);
+ }
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->admin)
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+ pr_err("QAT: Failed to initialise service %s\n",
+ service->name);
+ return -EFAULT;
+ }
+ set_bit(accel_dev->accel_id, &service->init_status);
+ }
+
+ hw_data->enable_error_correction(accel_dev);
+
+ if (adf_ae_start(accel_dev)) {
+ pr_err("QAT: AE Start Failed\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (!service->admin)
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+ pr_err("QAT: Failed to start service %s\n",
+ service->name);
+ return -EFAULT;
+ }
+ set_bit(accel_dev->accel_id, &service->start_status);
+ }
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->admin)
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+ pr_err("QAT: Failed to start service %s\n",
+ service->name);
+ return -EFAULT;
+ }
+ set_bit(accel_dev->accel_id, &service->start_status);
+ }
+
+ clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+ if (qat_algs_register()) {
+ pr_err("QAT: Failed to register crypto algs\n");
+ set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_start);
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct service_hndl *service;
+ struct list_head *list_itr;
+ int ret, wait = 0;
+
+ if (!adf_dev_started(accel_dev) &&
+ !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
+ return 0;
+ }
+ clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+ if (qat_algs_unregister())
+ pr_err("QAT: Failed to unregister crypto algs\n");
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->admin)
+ continue;
+ if (!test_bit(accel_dev->accel_id, &service->start_status))
+ continue;
+ ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+ if (!ret) {
+ clear_bit(accel_dev->accel_id, &service->start_status);
+ } else if (ret == -EAGAIN) {
+ wait = 1;
+ clear_bit(accel_dev->accel_id, &service->start_status);
+ }
+ }
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (!service->admin)
+ continue;
+ if (!test_bit(accel_dev->accel_id, &service->start_status))
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_STOP))
+ pr_err("QAT: Failed to shutdown service %s\n",
+ service->name);
+ else
+ clear_bit(accel_dev->accel_id, &service->start_status);
+ }
+
+ if (wait)
+ msleep(100);
+
+ if (adf_dev_started(accel_dev)) {
+ if (adf_ae_stop(accel_dev))
+ pr_err("QAT: failed to stop AE\n");
+ else
+ clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+ }
+
+ if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+ if (adf_ae_fw_release(accel_dev))
+ pr_err("QAT: Failed to release the ucode\n");
+ else
+ clear_bit(ADF_STATUS_AE_UCODE_LOADED,
+ &accel_dev->status);
+ }
+
+ if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+ if (adf_ae_shutdown(accel_dev))
+ pr_err("QAT: Failed to shutdown Accel Engine\n");
+ else
+ clear_bit(ADF_STATUS_AE_INITIALISED,
+ &accel_dev->status);
+ }
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->admin)
+ continue;
+ if (!test_bit(accel_dev->accel_id, &service->init_status))
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+ pr_err("QAT: Failed to shutdown service %s\n",
+ service->name);
+ else
+ clear_bit(accel_dev->accel_id, &service->init_status);
+ }
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (!service->admin)
+ continue;
+ if (!test_bit(accel_dev->accel_id, &service->init_status))
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+ pr_err("QAT: Failed to shutdown service %s\n",
+ service->name);
+ else
+ clear_bit(accel_dev->accel_id, &service->init_status);
+ }
+
+ if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+ hw_data->free_irq(accel_dev);
+ clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+ }
+
+ /* Delete configuration only if not restarting */
+ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+ adf_cfg_del_all(accel_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_stop);
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->admin)
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+ pr_err("QAT: Failed to restart service %s.\n",
+ service->name);
+ }
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (!service->admin)
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+ pr_err("QAT: Failed to restart service %s.\n",
+ service->name);
+ }
+ return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->admin)
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+ pr_err("QAT: Failed to restart service %s.\n",
+ service->name);
+ }
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (!service->admin)
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+ pr_err("QAT: Failed to restart service %s.\n",
+ service->name);
+ }
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
new file mode 100644
index 000000000000..5f3fa45348b4
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -0,0 +1,567 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+ uint32_t div = data >> shift;
+ uint32_t mult = div << shift;
+
+ return data - mult;
+}
+
+static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
+{
+ if (((size - 1) & addr) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
+{
+ int i = ADF_MIN_RING_SIZE;
+
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+ return i;
+
+ return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+ spin_lock(&bank->lock);
+ if (bank->ring_mask & (1 << ring)) {
+ spin_unlock(&bank->lock);
+ return -EFAULT;
+ }
+ bank->ring_mask |= (1 << ring);
+ spin_unlock(&bank->lock);
+ return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+ spin_lock(&bank->lock);
+ bank->ring_mask &= ~(1 << ring);
+ spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+ spin_lock_bh(&bank->lock);
+ bank->irq_mask |= (1 << ring);
+ spin_unlock_bh(&bank->lock);
+ WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+ WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
+ bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+ spin_lock_bh(&bank->lock);
+ bank->irq_mask &= ~(1 << ring);
+ spin_unlock_bh(&bank->lock);
+ WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
+{
+ if (atomic_add_return(1, ring->inflights) >
+ ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+ atomic_dec(ring->inflights);
+ return -EAGAIN;
+ }
+ spin_lock_bh(&ring->lock);
+ memcpy(ring->base_addr + ring->tail, msg,
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+ ring->tail = adf_modulo(ring->tail +
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+ ADF_RING_SIZE_MODULO(ring->ring_size));
+ WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
+ ring->ring_number, ring->tail);
+ spin_unlock_bh(&ring->lock);
+ return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+ uint32_t msg_counter = 0;
+ uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
+
+ while (*msg != ADF_RING_EMPTY_SIG) {
+ ring->callback((uint32_t *)msg);
+ *msg = ADF_RING_EMPTY_SIG;
+ ring->head = adf_modulo(ring->head +
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+ ADF_RING_SIZE_MODULO(ring->ring_size));
+ msg_counter++;
+ msg = (uint32_t *)(ring->base_addr + ring->head);
+ }
+ if (msg_counter > 0) {
+ WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
+ ring->bank->bank_number,
+ ring->ring_number, ring->head);
+ atomic_sub(msg_counter, ring->inflights);
+ }
+ return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+ uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+ WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+ ring->ring_number, ring_config);
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+ uint32_t ring_config =
+ BUILD_RESP_RING_CONFIG(ring->ring_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+ ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+ struct adf_etr_bank_data *bank = ring->bank;
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ uint64_t ring_base;
+ uint32_t ring_size_bytes =
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+ ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+ ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+ ring_size_bytes, &ring->dma_addr,
+ GFP_KERNEL);
+ if (!ring->base_addr)
+ return -ENOMEM;
+
+ memset(ring->base_addr, 0x7F, ring_size_bytes);
+ /* The base_addr has to be aligned to the size of the buffer */
+ if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+ pr_err("QAT: Ring address not aligned\n");
+ dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+ ring->base_addr, ring->dma_addr);
+ return -EFAULT;
+ }
+
+ if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+ adf_configure_tx_ring(ring);
+
+ else
+ adf_configure_rx_ring(ring);
+
+ ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
+ WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
+ ring->ring_number, ring_base);
+ spin_lock_init(&ring->lock);
+ return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+ uint32_t ring_size_bytes =
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+ ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+ if (ring->base_addr) {
+ memset(ring->base_addr, 0x7F, ring_size_bytes);
+ dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+ ring_size_bytes, ring->base_addr,
+ ring->dma_addr);
+ }
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+ uint32_t bank_num, uint32_t num_msgs,
+ uint32_t msg_size, const char *ring_name,
+ adf_callback_fn callback, int poll_mode,
+ struct adf_etr_ring_data **ring_ptr)
+{
+ struct adf_etr_data *transport_data = accel_dev->transport;
+ struct adf_etr_bank_data *bank;
+ struct adf_etr_ring_data *ring;
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ uint32_t ring_num;
+ int ret;
+
+ if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+ pr_err("QAT: Invalid bank number\n");
+ return -EFAULT;
+ }
+ if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ pr_err("QAT: Invalid msg size\n");
+ return -EFAULT;
+ }
+ if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+ ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+ pr_err("QAT: Invalid ring size for given msg size\n");
+ return -EFAULT;
+ }
+ if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+ pr_err("QAT: Section %s, no such entry : %s\n",
+ section, ring_name);
+ return -EFAULT;
+ }
+ if (kstrtouint(val, 10, &ring_num)) {
+ pr_err("QAT: Can't get ring number\n");
+ return -EFAULT;
+ }
+
+ bank = &transport_data->banks[bank_num];
+ if (adf_reserve_ring(bank, ring_num)) {
+ pr_err("QAT: Ring %d, %s already exists.\n",
+ ring_num, ring_name);
+ return -EFAULT;
+ }
+ ring = &bank->rings[ring_num];
+ ring->ring_number = ring_num;
+ ring->bank = bank;
+ ring->callback = callback;
+ ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+ ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+ ring->head = 0;
+ ring->tail = 0;
+ atomic_set(ring->inflights, 0);
+ ret = adf_init_ring(ring);
+ if (ret)
+ goto err;
+
+ /* Enable HW arbitration for the given ring */
+ accel_dev->hw_device->hw_arb_ring_enable(ring);
+
+ if (adf_ring_debugfs_add(ring, ring_name)) {
+ pr_err("QAT: Couldn't add ring debugfs entry\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Enable interrupts if needed */
+ if (callback && (!poll_mode))
+ adf_enable_ring_irq(bank, ring->ring_number);
+ *ring_ptr = ring;
+ return 0;
+err:
+ adf_cleanup_ring(ring);
+ adf_unreserve_ring(bank, ring_num);
+ accel_dev->hw_device->hw_arb_ring_disable(ring);
+ return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+ struct adf_etr_bank_data *bank = ring->bank;
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+
+ /* Disable interrupts for the given ring */
+ adf_disable_ring_irq(bank, ring->ring_number);
+
+ /* Clear PCI config space */
+ WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
+ ring->ring_number, 0);
+ WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
+ ring->ring_number, 0);
+ adf_ring_debugfs_rm(ring);
+ adf_unreserve_ring(bank, ring->ring_number);
+ /* Disable HW arbitration for the given ring */
+ accel_dev->hw_device->hw_arb_ring_disable(ring);
+ adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+ uint32_t empty_rings, i;
+
+ empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
+ empty_rings = ~empty_rings & bank->irq_mask;
+
+ for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
+ if (empty_rings & (1 << i))
+ adf_handle_response(&bank->rings[i]);
+ }
+}
+
+/**
+ * adf_response_handler() - Bottom half handler response handler
+ * @bank_addr: Address of a ring bank for with the BH was scheduled.
+ *
+ * Function is the bottom half handler for the response from acceleration
+ * device. There is one handler for every ring bank. Function checks all
+ * communication rings in the bank.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_response_handler(unsigned long bank_addr)
+{
+ struct adf_etr_bank_data *bank = (void *)bank_addr;
+
+ /* Handle all the responses nad reenable IRQs */
+ adf_ring_response_handler(bank);
+ WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+ bank->irq_mask);
+}
+EXPORT_SYMBOL_GPL(adf_response_handler);
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+ const char *section, const char *format,
+ uint32_t key, uint32_t *value)
+{
+ char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+ snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+ if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+ return -EFAULT;
+
+ if (kstrtouint(val_buf, 10, value))
+ return -EFAULT;
+ return 0;
+}
+
+static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
+ const char *section, uint32_t bank_num_in_accel)
+{
+ if (adf_get_cfg_int(bank->accel_dev, section,
+ ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+ bank_num_in_accel, &bank->irq_coalesc_timer))
+ bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+ if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+ ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+ bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+ struct adf_etr_bank_data *bank,
+ uint32_t bank_num, void __iomem *csr_addr)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_etr_ring_data *ring;
+ struct adf_etr_ring_data *tx_ring;
+ uint32_t i, coalesc_enabled;
+
+ memset(bank, 0, sizeof(*bank));
+ bank->bank_number = bank_num;
+ bank->csr_addr = csr_addr;
+ bank->accel_dev = accel_dev;
+ spin_lock_init(&bank->lock);
+
+ /* Enable IRQ coalescing always. This will allow to use
+ * the optimised flag and coalesc register.
+ * If it is disabled in the config file just use min time value */
+ if (adf_get_cfg_int(accel_dev, "Accelerator0",
+ ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
+ bank_num, &coalesc_enabled) && coalesc_enabled)
+ adf_enable_coalesc(bank, "Accelerator0", bank_num);
+ else
+ bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+ for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+ WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
+ WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
+ ring = &bank->rings[i];
+ if (hw_data->tx_rings_mask & (1 << i)) {
+ ring->inflights = kzalloc_node(sizeof(atomic_t),
+ GFP_KERNEL,
+ accel_dev->numa_node);
+ if (!ring->inflights)
+ goto err;
+ } else {
+ if (i < hw_data->tx_rx_gap) {
+ pr_err("QAT: Invalid tx rings mask config\n");
+ goto err;
+ }
+ tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+ ring->inflights = tx_ring->inflights;
+ }
+ }
+ if (adf_bank_debugfs_add(bank)) {
+ pr_err("QAT: Failed to add bank debugfs entry\n");
+ goto err;
+ }
+
+ WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
+ return 0;
+err:
+ for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+ ring = &bank->rings[i];
+ if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
+ kfree(ring->inflights);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *etr_data;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *csr_addr;
+ uint32_t size;
+ uint32_t num_banks = 0;
+ int i, ret;
+
+ etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+ accel_dev->numa_node);
+ if (!etr_data)
+ return -ENOMEM;
+
+ num_banks = GET_MAX_BANKS(accel_dev);
+ size = num_banks * sizeof(struct adf_etr_bank_data);
+ etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+ if (!etr_data->banks) {
+ ret = -ENOMEM;
+ goto err_bank;
+ }
+
+ accel_dev->transport = etr_data;
+ i = hw_data->get_etr_bar_id(hw_data);
+ csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+ /* accel_dev->debugfs_dir should always be non-NULL here */
+ etr_data->debug = debugfs_create_dir("transport",
+ accel_dev->debugfs_dir);
+ if (!etr_data->debug) {
+ pr_err("QAT: Unable to create transport debugfs entry\n");
+ ret = -ENOENT;
+ goto err_bank_debug;
+ }
+
+ for (i = 0; i < num_banks; i++) {
+ ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+ csr_addr);
+ if (ret)
+ goto err_bank_all;
+ }
+
+ return 0;
+
+err_bank_all:
+ debugfs_remove(etr_data->debug);
+err_bank_debug:
+ kfree(etr_data->banks);
+err_bank:
+ kfree(etr_data);
+ accel_dev->transport = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+ uint32_t i;
+
+ for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_etr_ring_data *ring = &bank->rings[i];
+
+ if (bank->ring_mask & (1 << i))
+ adf_cleanup_ring(ring);
+
+ if (hw_data->tx_rings_mask & (1 << i))
+ kfree(ring->inflights);
+ }
+ adf_bank_debugfs_rm(bank);
+ memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
+
+ for (i = 0; i < num_banks; i++)
+ cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *etr_data = accel_dev->transport;
+
+ if (etr_data) {
+ adf_cleanup_etr_handles(accel_dev);
+ debugfs_remove(etr_data->debug);
+ kfree(etr_data->banks);
+ kfree(etr_data);
+ accel_dev->transport = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
new file mode 100644
index 000000000000..386485bd9c95
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport.h
@@ -0,0 +1,63 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+ uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
+ const char *ring_name, adf_callback_fn callback,
+ int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
new file mode 100644
index 000000000000..91d88d676580
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -0,0 +1,160 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values internal */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+ ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+ ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
+#define BUILD_RING_CONFIG(size) \
+ ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+ ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
new file mode 100644
index 000000000000..6b6974553514
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -0,0 +1,304 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct adf_etr_ring_data *ring = sfile->private;
+
+ mutex_lock(&ring_read_lock);
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ return NULL;
+
+ return ring->base_addr +
+ (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ struct adf_etr_ring_data *ring = sfile->private;
+
+ if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ return NULL;
+
+ return ring->base_addr +
+ (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+ struct adf_etr_ring_data *ring = sfile->private;
+ struct adf_etr_bank_data *bank = ring->bank;
+ uint32_t *msg = v;
+ void __iomem *csr = ring->bank->csr_addr;
+ int i, x;
+
+ if (v == SEQ_START_TOKEN) {
+ int head, tail, empty;
+
+ head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+ ring->ring_number);
+ tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+ ring->ring_number);
+ empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+ seq_puts(sfile, "------- Ring configuration -------\n");
+ seq_printf(sfile, "ring num %d, bank num %d\n",
+ ring->ring_number, ring->bank->bank_number);
+ seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+ head, tail, (empty & 1 << ring->ring_number)
+ >> ring->ring_number);
+ seq_printf(sfile, "ring size %d, msg size %d\n",
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+ seq_puts(sfile, "----------- Ring data ------------\n");
+ return 0;
+ }
+ seq_printf(sfile, "%p:", msg);
+ x = 0;
+ i = 0;
+ for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) {
+ seq_printf(sfile, " %08X", *(msg + i));
+ if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 &&
+ (++x == 8)) {
+ seq_printf(sfile, "\n%p:", msg + i + 1);
+ x = 0;
+ }
+ }
+ seq_puts(sfile, "\n");
+ return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+ mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_sops = {
+ .start = adf_ring_start,
+ .next = adf_ring_next,
+ .stop = adf_ring_stop,
+ .show = adf_ring_show
+};
+
+static int adf_ring_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &adf_ring_sops);
+
+ if (!ret) {
+ struct seq_file *seq_f = file->private_data;
+
+ seq_f->private = inode->i_private;
+ }
+ return ret;
+}
+
+static const struct file_operations adf_ring_debug_fops = {
+ .open = adf_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+ struct adf_etr_ring_debug_entry *ring_debug;
+ char entry_name[8];
+
+ ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+ if (!ring_debug)
+ return -ENOMEM;
+
+ strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+ snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+ ring->ring_number);
+
+ ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+ ring->bank->bank_debug_dir,
+ ring, &adf_ring_debug_fops);
+ if (!ring_debug->debug) {
+ pr_err("QAT: Failed to create ring debug entry.\n");
+ kfree(ring_debug);
+ return -EFAULT;
+ }
+ ring->ring_debug = ring_debug;
+ return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+ if (ring->ring_debug) {
+ debugfs_remove(ring->ring_debug->debug);
+ kfree(ring->ring_debug);
+ ring->ring_debug = NULL;
+ }
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+ mutex_lock(&bank_read_lock);
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK)
+ return NULL;
+
+ return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK)
+ return NULL;
+
+ return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+ struct adf_etr_bank_data *bank = sfile->private;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(sfile, "------- Bank %d configuration -------\n",
+ bank->bank_number);
+ } else {
+ int ring_id = *((int *)v) - 1;
+ struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+ void __iomem *csr = bank->csr_addr;
+ int head, tail, empty;
+
+ if (!(bank->ring_mask & 1 << ring_id))
+ return 0;
+
+ head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+ ring->ring_number);
+ tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+ ring->ring_number);
+ empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+ seq_printf(sfile,
+ "ring num %02d, head %04x, tail %04x, empty: %d\n",
+ ring->ring_number, head, tail,
+ (empty & 1 << ring->ring_number) >>
+ ring->ring_number);
+ }
+ return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+ mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_sops = {
+ .start = adf_bank_start,
+ .next = adf_bank_next,
+ .stop = adf_bank_stop,
+ .show = adf_bank_show
+};
+
+static int adf_bank_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &adf_bank_sops);
+
+ if (!ret) {
+ struct seq_file *seq_f = file->private_data;
+
+ seq_f->private = inode->i_private;
+ }
+ return ret;
+}
+
+static const struct file_operations adf_bank_debug_fops = {
+ .open = adf_bank_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct dentry *parent = accel_dev->transport->debug;
+ char name[8];
+
+ snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+ bank->bank_debug_dir = debugfs_create_dir(name, parent);
+ if (!bank->bank_debug_dir) {
+ pr_err("QAT: Failed to create bank debug dir.\n");
+ return -EFAULT;
+ }
+
+ bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+ bank->bank_debug_dir, bank,
+ &adf_bank_debug_fops);
+ if (!bank->bank_debug_cfg) {
+ pr_err("QAT: Failed to create bank debug entry.\n");
+ debugfs_remove(bank->bank_debug_dir);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+ debugfs_remove(bank->bank_debug_cfg);
+ debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
new file mode 100644
index 000000000000..f854bac276b0
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -0,0 +1,118 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+ char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+ void *base_addr;
+ atomic_t *inflights;
+ spinlock_t lock; /* protects ring data struct */
+ adf_callback_fn callback;
+ struct adf_etr_bank_data *bank;
+ dma_addr_t dma_addr;
+ uint16_t head;
+ uint16_t tail;
+ uint8_t ring_number;
+ uint8_t ring_size;
+ uint8_t msg_size;
+ uint8_t reserved;
+ struct adf_etr_ring_debug_entry *ring_debug;
+} __packed;
+
+struct adf_etr_bank_data {
+ struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
+ struct tasklet_struct resp_hanlder;
+ void __iomem *csr_addr;
+ struct adf_accel_dev *accel_dev;
+ uint32_t irq_coalesc_timer;
+ uint16_t ring_mask;
+ uint16_t irq_mask;
+ spinlock_t lock; /* protects bank data struct */
+ struct dentry *bank_debug_dir;
+ struct dentry *bank_debug_cfg;
+ uint32_t bank_number;
+} __packed;
+
+struct adf_etr_data {
+ struct adf_etr_bank_data *banks;
+ struct dentry *debug;
+};
+
+void adf_response_handler(unsigned long bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+ return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+ const char *name)
+{
+ return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
new file mode 100644
index 000000000000..f1e30e24a419
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -0,0 +1,316 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+ (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+ (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+ ICP_QAT_FW_COMN_RESP_SERV_NULL,
+ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+ ICP_QAT_FW_COMN_REQ_NULL = 0,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+ ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t serv_specif_fields[4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+ uint64_t opaque_data;
+ uint64_t src_data_addr;
+ uint64_t dest_data_addr;
+ uint32_t src_length;
+ uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+ uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+ uint8_t resrvd1;
+ uint8_t service_cmd_id;
+ uint8_t service_type;
+ uint8_t hdr_flags;
+ uint16_t serv_specif_flags;
+ uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+ uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+ uint8_t xlat_err_code;
+ uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+ uint8_t resrvd1;
+ uint8_t service_id;
+ uint8_t response_type;
+ uint8_t hdr_flags;
+ struct icp_qat_fw_comn_error comn_error;
+ uint8_t comn_status;
+ uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_hdr;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+ (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+ ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+ | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+ QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+ ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+ QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+ (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+ QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+ (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+ QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+ (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+ QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+ QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+ QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+ ICP_QAT_FW_SLICE_NULL = 0,
+ ICP_QAT_FW_SLICE_CIPHER = 1,
+ ICP_QAT_FW_SLICE_AUTH = 2,
+ ICP_QAT_FW_SLICE_DRAM_RD = 3,
+ ICP_QAT_FW_SLICE_DRAM_WR = 4,
+ ICP_QAT_FW_SLICE_COMP = 5,
+ ICP_QAT_FW_SLICE_XLAT = 6,
+ ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644
index 000000000000..72a59faa9005
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -0,0 +1,131 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+ ICP_QAT_FW_INIT_ME = 0,
+ ICP_QAT_FW_TRNG_ENABLE = 1,
+ ICP_QAT_FW_TRNG_DISABLE = 2,
+ ICP_QAT_FW_CONSTANTS_CFG = 3,
+ ICP_QAT_FW_STATUS_GET = 4,
+ ICP_QAT_FW_COUNTERS_GET = 5,
+ ICP_QAT_FW_LOOPBACK = 6,
+ ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+ ICP_QAT_FW_HEARTBEAT_GET = 8
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+ ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+ ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+ uint16_t init_cfg_sz;
+ uint8_t resrvd1;
+ uint8_t init_admin_cmd_id;
+ uint32_t resrvd2;
+ uint64_t opaque_data;
+ uint64_t init_cfg_ptr;
+ uint64_t resrvd3;
+};
+
+struct icp_qat_fw_init_admin_resp_hdr {
+ uint8_t flags;
+ uint8_t resrvd1;
+ uint8_t status;
+ uint8_t init_admin_cmd_id;
+};
+
+struct icp_qat_fw_init_admin_resp_pars {
+ union {
+ uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint32_t version_patch_num;
+ uint8_t context_id;
+ uint8_t ae_id;
+ uint16_t resrvd1;
+ uint64_t resrvd2;
+ } s1;
+ struct {
+ uint64_t req_rec_count;
+ uint64_t resp_sent_count;
+ } s2;
+ } u;
+};
+
+struct icp_qat_fw_init_admin_resp {
+ struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
+ union {
+ uint32_t resrvd2;
+ struct {
+ uint16_t version_minor_num;
+ uint16_t version_major_num;
+ } s;
+ } u;
+ uint64_t opaque_data;
+ struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
+};
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, \
+ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
new file mode 100644
index 000000000000..c8d26697e8ea
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
@@ -0,0 +1,404 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+ ICP_QAT_FW_LA_CMD_CIPHER = 0,
+ ICP_QAT_FW_LA_CMD_AUTH = 1,
+ ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+ ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+ ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+ ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+ ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+ ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+ ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+ ICP_QAT_FW_LA_CMD_MGF1 = 9,
+ ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+ ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+ ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO 2
+#define ICP_QAT_FW_LA_CCM_PROTO 1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK 0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+ cmp_auth, ret_auth, update_state, \
+ ciph_iv, ciphcfg, partial) \
+ (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+ ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+ QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+ ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+ QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+ ((proto & QAT_LA_PROTO_MASK) << \
+ QAT_LA_PROTO_BITPOS) | \
+ ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+ QAT_LA_CMP_AUTH_RES_BITPOS) | \
+ ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+ QAT_LA_RET_AUTH_RES_BITPOS) | \
+ ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+ QAT_LA_UPDATE_STATE_BITPOS) | \
+ ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+ QAT_LA_CIPH_IV_FLD_BITPOS) | \
+ ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+ ((partial & QAT_LA_PARTIAL_MASK) << \
+ QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+ QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t cipher_padding_sz;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+ uint32_t resrvd1;
+ uint8_t resrvd2;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t resrvd3;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd4;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id_cipher;
+ uint8_t cipher_padding_sz;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id_auth;
+ uint8_t resrvd1;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd2;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+ (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ union {
+ uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint64_t cipher_IV_ptr;
+ uint64_t resrvd1;
+ } s;
+ } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+ uint32_t auth_off;
+ uint32_t auth_len;
+ union {
+ uint64_t auth_partial_st_prefix;
+ uint64_t aad_adr;
+ } u1;
+ uint64_t auth_res_addr;
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint8_t hash_state_sz;
+ uint8_t auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644
index 000000000000..5e1aa40c0404
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -0,0 +1,78 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+ unsigned int state;
+ unsigned int ustore_size;
+ unsigned int free_addr;
+ unsigned int free_size;
+ unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+ struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+ unsigned int ae_mask;
+ unsigned int slice_mask;
+ unsigned int revision_id;
+ unsigned int ae_max_num;
+ unsigned int upc_mask;
+ unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_handle {
+ struct icp_qat_fw_loader_hal_handle *hal_handle;
+ void *obj_handle;
+ void __iomem *hal_sram_addr_v;
+ void __iomem *hal_cap_g_ctl_csr_addr_v;
+ void __iomem *hal_cap_ae_xfer_csr_addr_v;
+ void __iomem *hal_cap_ae_local_csr_addr_v;
+ void __iomem *hal_ep_csr_addr_v;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
new file mode 100644
index 000000000000..85b6d241ea82
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h
@@ -0,0 +1,125 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+ MISC_CONTROL = 0x04,
+ ICP_RESET = 0x0c,
+ ICP_GLOBAL_CLK_ENABLE = 0x50
+};
+
+enum hal_ae_csr {
+ USTORE_ADDRESS = 0x000,
+ USTORE_DATA_LOWER = 0x004,
+ USTORE_DATA_UPPER = 0x008,
+ ALU_OUT = 0x010,
+ CTX_ARB_CNTL = 0x014,
+ CTX_ENABLES = 0x018,
+ CC_ENABLE = 0x01c,
+ CSR_CTX_POINTER = 0x020,
+ CTX_STS_INDIRECT = 0x040,
+ ACTIVE_CTX_STATUS = 0x044,
+ CTX_SIG_EVENTS_INDIRECT = 0x048,
+ CTX_SIG_EVENTS_ACTIVE = 0x04c,
+ CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+ LM_ADDR_0_INDIRECT = 0x060,
+ LM_ADDR_1_INDIRECT = 0x068,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+ FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+ TIMESTAMP_LOW = 0x0c0,
+ TIMESTAMP_HIGH = 0x0c4,
+ PROFILE_COUNT = 0x144,
+ SIGNATURE_ENABLE = 0x150,
+ AE_MISC_CONTROL = 0x160,
+ LOCAL_CSR_STATUS = 0x180,
+};
+
+#define UA_ECS (0x1 << 31)
+#define ACS_ABO_BITPOS 31
+#define ACS_ACNO 0x7
+#define CE_ENABLE_BITPOS 0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS 16
+#define CE_LMADDR_1_GLOBAL_BITPOS 17
+#define CE_NN_MODE_BITPOS 20
+#define CE_REG_PAR_ERR_BITPOS 25
+#define CE_BREAKPOINT_BITPOS 27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS 31
+#define CE_NN_MODE (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY (0x1)
+#define LCS_STATUS (0x1)
+#define MMC_SHARE_CS_BITPOS 2
+#define GLOBAL_CSR 0xA00
+
+#define SET_CAP_CSR(handle, csr, val) \
+ ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+ ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr)
+#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
+#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
+#define AE_CSR(handle, ae) \
+ (handle->hal_cap_ae_local_csr_addr_v + \
+ ((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
+#define SET_AE_CSR(handle, ae, csr, val) \
+ ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+ (handle->hal_cap_ae_xfer_csr_addr_v + \
+ ((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+ ((reg & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+ ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+ ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
+#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
new file mode 100644
index 000000000000..5031f8c10d75
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -0,0 +1,305 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+ ICP_QAT_HW_AE_0 = 0,
+ ICP_QAT_HW_AE_1 = 1,
+ ICP_QAT_HW_AE_2 = 2,
+ ICP_QAT_HW_AE_3 = 3,
+ ICP_QAT_HW_AE_4 = 4,
+ ICP_QAT_HW_AE_5 = 5,
+ ICP_QAT_HW_AE_6 = 6,
+ ICP_QAT_HW_AE_7 = 7,
+ ICP_QAT_HW_AE_8 = 8,
+ ICP_QAT_HW_AE_9 = 9,
+ ICP_QAT_HW_AE_10 = 10,
+ ICP_QAT_HW_AE_11 = 11,
+ ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+ ICP_QAT_HW_QAT_0 = 0,
+ ICP_QAT_HW_QAT_1 = 1,
+ ICP_QAT_HW_QAT_2 = 2,
+ ICP_QAT_HW_QAT_3 = 3,
+ ICP_QAT_HW_QAT_4 = 4,
+ ICP_QAT_HW_QAT_5 = 5,
+ ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+ ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+ ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+ ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+ ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+ ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+ ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+ ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+ ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+ ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+ ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+ ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+ ICP_QAT_HW_AUTH_MODE0 = 0,
+ ICP_QAT_HW_AUTH_MODE1 = 1,
+ ICP_QAT_HW_AUTH_MODE2 = 2,
+ ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+ uint32_t config;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+ QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+ (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+ & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+ ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+ __be32 counter;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+ (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+ struct icp_qat_hw_auth_config auth_config;
+ struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+ ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+ uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+ struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+ ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+ ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+ ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+ ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+ ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+ ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+ ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+ ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+ ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+ ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+ ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+ ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+ ICP_QAT_HW_CIPHER_F8_MODE = 3,
+ ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+ ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+ ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+ ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+ ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+ (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+ ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+ ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+ ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+ struct icp_qat_hw_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+ struct icp_qat_hw_cipher_aes256_f8 aes;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
new file mode 100644
index 000000000000..2132a8cbc4ec
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -0,0 +1,377 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_C_CPU_TYPE 0x00400000
+#define ICP_QAT_UCLO_MAX_AE 12
+#define ICP_QAT_UCLO_MAX_CTX 8
+#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE 0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG 128
+#define ICP_QAT_UCLO_MAX_NN_REG 128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_AE_ALL_CTX 0xff
+#define ICP_QAT_UOF_OBJID_LEN 8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff
+#define ICP_QAT_UOF_OBJS "UOF_OBJS"
+#define ICP_QAT_UOF_STRT "UOF_STRT"
+#define ICP_QAT_UOF_GTID "UOF_GTID"
+#define ICP_QAT_UOF_IMAG "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM "UOF_IMEM"
+#define ICP_QAT_UOF_MSEG "UOF_MSEG"
+#define ICP_QAT_UOF_LOCAL_SCOPE 1
+#define ICP_QAT_UOF_INIT_EXPR 0
+#define ICP_QAT_UOF_INIT_REG 1
+#define ICP_QAT_UOF_INIT_REG_CTX 2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+
+enum icp_qat_uof_mem_region {
+ ICP_QAT_UOF_SRAM_REGION = 0x0,
+ ICP_QAT_UOF_LMEM_REGION = 0x3,
+ ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+ ICP_NO_DEST,
+ ICP_GPA_REL,
+ ICP_GPA_ABS,
+ ICP_GPB_REL,
+ ICP_GPB_ABS,
+ ICP_SR_REL,
+ ICP_SR_RD_REL,
+ ICP_SR_WR_REL,
+ ICP_SR_ABS,
+ ICP_SR_RD_ABS,
+ ICP_SR_WR_ABS,
+ ICP_DR_REL,
+ ICP_DR_RD_REL,
+ ICP_DR_WR_REL,
+ ICP_DR_ABS,
+ ICP_DR_RD_ABS,
+ ICP_DR_WR_ABS,
+ ICP_LMEM,
+ ICP_LMEM0,
+ ICP_LMEM1,
+ ICP_NEIGH_REL,
+};
+
+struct icp_qat_uclo_page {
+ struct icp_qat_uclo_encap_page *encap_page;
+ struct icp_qat_uclo_region *region;
+ unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+ struct icp_qat_uclo_page *loaded;
+ struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+ struct icp_qat_uclo_region *region;
+ struct icp_qat_uclo_page *page;
+ struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+ struct icp_qat_uclo_encapme *encap_image;
+ unsigned int ctx_mask_assigned;
+ unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+ unsigned int slice_num;
+ unsigned int eff_ustore_size;
+ struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+ char *beg_uof;
+ struct icp_qat_uof_objhdr *obj_hdr;
+ struct icp_qat_uof_chunkhdr *chunk_hdr;
+ struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+ unsigned int start_addr;
+ unsigned int words_num;
+ uint64_t micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+ unsigned int def_page;
+ unsigned int page_region;
+ unsigned int beg_addr_v;
+ unsigned int beg_addr_p;
+ unsigned int micro_words_num;
+ unsigned int uwblock_num;
+ struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+ struct icp_qat_uof_image *img_ptr;
+ struct icp_qat_uclo_encap_page *page;
+ unsigned int ae_reg_num;
+ struct icp_qat_uof_ae_reg *ae_reg;
+ unsigned int init_regsym_num;
+ struct icp_qat_uof_init_regsym *init_regsym;
+ unsigned int sbreak_num;
+ struct icp_qat_uof_sbreak *sbreak;
+ unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+ unsigned int entry_num;
+ struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+ char *file_buff;
+ unsigned int checksum;
+ unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+ unsigned int table_len;
+ unsigned int reserved;
+ uint64_t strings;
+};
+
+struct icp_qat_uclo_objhandle {
+ unsigned int prod_type;
+ unsigned int prod_rev;
+ struct icp_qat_uclo_objhdr *obj_hdr;
+ struct icp_qat_uof_encap_obj encap_uof_obj;
+ struct icp_qat_uof_strtable str_table;
+ struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+ struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+ struct icp_qat_uclo_init_mem_table init_mem_tab;
+ struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+ struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+ int uimage_num;
+ int uword_in_bytes;
+ int global_inited;
+ unsigned int ae_num;
+ unsigned int ustore_phy_size;
+ void *obj_buf;
+ uint64_t *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+ unsigned int start_addr;
+ unsigned int words_num;
+ unsigned int uword_offset;
+ unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+ unsigned short file_id;
+ unsigned short reserved1;
+ char min_ver;
+ char maj_ver;
+ unsigned short reserved2;
+ unsigned short max_chunks;
+ unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+ char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+ unsigned int checksum;
+ unsigned int offset;
+ unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+ unsigned int cpu_type;
+ unsigned short min_cpu_ver;
+ unsigned short max_cpu_ver;
+ short max_chunks;
+ short num_chunks;
+ unsigned int reserved1;
+ unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+ char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+ unsigned int offset;
+ unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+ unsigned int offset_in_byte;
+ unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+ unsigned int sym_name;
+ char region;
+ char scope;
+ unsigned short reserved1;
+ unsigned int addr;
+ unsigned int num_in_bytes;
+ unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+ unsigned int sym_name;
+ char init_type;
+ char value_type;
+ char reg_type;
+ unsigned char ctx;
+ unsigned int reg_addr;
+ unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+ unsigned int sram_base;
+ unsigned int sram_size;
+ unsigned int sram_alignment;
+ unsigned int sdram_base;
+ unsigned int sdram_size;
+ unsigned int sdram_alignment;
+ unsigned int sdram1_base;
+ unsigned int sdram1_size;
+ unsigned int sdram1_alignment;
+ unsigned int scratch_base;
+ unsigned int scratch_size;
+ unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+ char tool_id[ICP_QAT_UOF_OBJID_LEN];
+ int tool_ver;
+ unsigned int reserved1;
+ unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+ unsigned int page_num;
+ unsigned int virt_uaddr;
+ unsigned char sbreak_type;
+ unsigned char reg_type;
+ unsigned short reserved1;
+ unsigned int addr_offset;
+ unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+ unsigned int page_region;
+ unsigned int page_num;
+ unsigned char def_page;
+ unsigned char reserved2;
+ unsigned short reserved1;
+ unsigned int beg_addr_v;
+ unsigned int beg_addr_p;
+ unsigned int neigh_reg_tab_offset;
+ unsigned int uc_var_tab_offset;
+ unsigned int imp_var_tab_offset;
+ unsigned int imp_expr_tab_offset;
+ unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+ unsigned int img_name;
+ unsigned int ae_assigned;
+ unsigned int ctx_assigned;
+ unsigned int cpu_type;
+ unsigned int entry_address;
+ unsigned int fill_pattern[2];
+ unsigned int reloadable_size;
+ unsigned char sensitivity;
+ unsigned char reserved;
+ unsigned short ae_mode;
+ unsigned short max_ver;
+ unsigned short min_ver;
+ unsigned short image_attrib;
+ unsigned short reserved2;
+ unsigned short page_region_num;
+ unsigned short numpages;
+ unsigned int reg_tab_offset;
+ unsigned int init_reg_sym_tab;
+ unsigned int sbreak_tab;
+ unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+ unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+ unsigned int name;
+ unsigned int vis_name;
+ unsigned short type;
+ unsigned short addr;
+ unsigned short access_mode;
+ unsigned char visible;
+ unsigned char reserved1;
+ unsigned short ref_count;
+ unsigned short reserved2;
+ unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+ unsigned int micro_words_num;
+ unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+ unsigned int ae;
+ unsigned int addr;
+ unsigned int *value;
+ unsigned int size;
+ struct icp_qat_uof_batch_init *next;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
new file mode 100644
index 000000000000..59df48872955
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -0,0 +1,1038 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/rng.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+static atomic_t active_dev;
+
+struct qat_alg_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __packed;
+
+struct qat_alg_buf_list {
+ uint64_t resrvd;
+ uint32_t num_bufs;
+ uint32_t num_mapped_bufs;
+ struct qat_alg_buf bufers[];
+} __packed __aligned(64);
+
+/* Common content descriptor */
+struct qat_alg_cd {
+ union {
+ struct qat_enc { /* Encrypt content desc */
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+ } qat_enc_cd;
+ struct qat_dec { /* Decrytp content desc */
+ struct icp_qat_hw_auth_algo_blk hash;
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ } qat_dec_cd;
+ };
+} __aligned(64);
+
+#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
+
+struct qat_auth_state {
+ uint8_t data[MAX_AUTH_STATE_SIZE];
+} __aligned(64);
+
+struct qat_alg_session_ctx {
+ struct qat_alg_cd *enc_cd;
+ dma_addr_t enc_cd_paddr;
+ struct qat_alg_cd *dec_cd;
+ dma_addr_t dec_cd_paddr;
+ struct qat_auth_state *auth_hw_state_enc;
+ dma_addr_t auth_state_enc_paddr;
+ struct qat_auth_state *auth_hw_state_dec;
+ dma_addr_t auth_state_dec_paddr;
+ struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
+ struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
+ struct qat_crypto_instance *inst;
+ struct crypto_tfm *tfm;
+ struct crypto_shash *hash_tfm;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ uint8_t salt[AES_BLOCK_SIZE];
+ spinlock_t lock; /* protects qat_alg_session_ctx struct */
+};
+
+static int get_current_node(void)
+{
+ return cpu_data(current_thread_info()->cpu).phys_proc_id;
+}
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ default:
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+ struct qat_alg_session_ctx *ctx,
+ const uint8_t *auth_key,
+ unsigned int auth_keylen, uint8_t *auth_state)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(ctx->hash_tfm)];
+ } desc;
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+ int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+ uint8_t *ipad = auth_state;
+ uint8_t *opad = ipad + block_size;
+ __be32 *hash_state_out;
+ __be64 *hash512_state_out;
+ int i, offset;
+
+ desc.shash.tfm = ctx->hash_tfm;
+ desc.shash.flags = 0x0;
+
+ if (auth_keylen > block_size) {
+ char buff[SHA512_BLOCK_SIZE];
+ int ret = crypto_shash_digest(&desc.shash, auth_key,
+ auth_keylen, buff);
+ if (ret)
+ return ret;
+
+ memcpy(ipad, buff, digest_size);
+ memcpy(opad, buff, digest_size);
+ memset(ipad + digest_size, 0, block_size - digest_size);
+ memset(opad + digest_size, 0, block_size - digest_size);
+ } else {
+ memcpy(ipad, auth_key, auth_keylen);
+ memcpy(opad, auth_key, auth_keylen);
+ memset(ipad + auth_keylen, 0, block_size - auth_keylen);
+ memset(opad + auth_keylen, 0, block_size - auth_keylen);
+ }
+
+ for (i = 0; i < block_size; i++) {
+ char *ipad_ptr = ipad + i;
+ char *opad_ptr = opad + i;
+ *ipad_ptr ^= 0x36;
+ *opad_ptr ^= 0x5C;
+ }
+
+ if (crypto_shash_init(&desc.shash))
+ return -EFAULT;
+
+ if (crypto_shash_update(&desc.shash, ipad, block_size))
+ return -EFAULT;
+
+ hash_state_out = (__be32 *)hash->sha.state1;
+ hash512_state_out = (__be64 *)hash_state_out;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (crypto_shash_export(&desc.shash, &sha1))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+ *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (crypto_shash_export(&desc.shash, &sha256))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+ *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (crypto_shash_export(&desc.shash, &sha512))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+ *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ if (crypto_shash_init(&desc.shash))
+ return -EFAULT;
+
+ if (crypto_shash_update(&desc.shash, opad, block_size))
+ return -EFAULT;
+
+ offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+ hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+ hash512_state_out = (__be64 *)hash_state_out;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (crypto_shash_export(&desc.shash, &sha1))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+ *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (crypto_shash_export(&desc.shash, &sha256))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+ *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (crypto_shash_export(&desc.shash, &sha512))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+ *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ break;
+ default:
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_SGL);
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_PARTIAL_NONE);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
+ int alg, struct crypto_authenc_keys *keys)
+{
+ struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
+ unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+ struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+ struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+ struct icp_qat_hw_auth_algo_blk *hash =
+ (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+ sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (struct icp_qat_fw_la_auth_req_params *)
+ ((char *)&req_tmpl->serv_specif_rqpars +
+ sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+ /* CD setup */
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
+ memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+ hash->sha.inner_setup.auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ ctx->qat_hash_alg, digestsize);
+ hash->sha.inner_setup.auth_counter.counter =
+ cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+ if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
+ (uint8_t *)ctx->auth_hw_state_enc))
+ return -EFAULT;
+
+ /* Request setup */
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+ /* Cipher CD config setup */
+ cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+ cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ hash_cd_ctrl->inner_state1_sz =
+ round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+ hash_cd_ctrl->inner_state2_sz =
+ round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ default:
+ break;
+ }
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+ auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
+ sizeof(struct icp_qat_hw_auth_counter) +
+ round_up(hash_cd_ctrl->inner_state1_sz, 8);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ return 0;
+}
+
+static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
+ int alg, struct crypto_authenc_keys *keys)
+{
+ struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
+ unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+ struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+ struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+ struct icp_qat_hw_cipher_algo_blk *cipher =
+ (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+ sizeof(struct icp_qat_hw_auth_setup) +
+ roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (struct icp_qat_fw_la_auth_req_params *)
+ ((char *)&req_tmpl->serv_specif_rqpars +
+ sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+ /* CD setup */
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
+ memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+ hash->sha.inner_setup.auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ ctx->qat_hash_alg,
+ digestsize);
+ hash->sha.inner_setup.auth_counter.counter =
+ cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+ if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
+ (uint8_t *)ctx->auth_hw_state_dec))
+ return -EFAULT;
+
+ /* Request setup */
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+ /* Cipher CD config setup */
+ cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+ cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset =
+ (sizeof(struct icp_qat_hw_auth_setup) +
+ roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = 0;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ hash_cd_ctrl->inner_state1_sz =
+ round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+ hash_cd_ctrl->inner_state2_sz =
+ round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ default:
+ break;
+ }
+
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+ auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
+ sizeof(struct icp_qat_hw_auth_counter) +
+ round_up(hash_cd_ctrl->inner_state1_sz, 8);
+ auth_param->auth_res_sz = digestsize;
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ return 0;
+}
+
+static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
+ const uint8_t *key, unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int alg;
+
+ if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
+ return -EFAULT;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ switch (keys.enckeylen) {
+ case AES_KEYSIZE_128:
+ alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ goto bad_key;
+ break;
+ }
+
+ if (qat_alg_init_enc_session(ctx, alg, &keys))
+ goto error;
+
+ if (qat_alg_init_dec_session(ctx, alg, &keys))
+ goto error;
+
+ return 0;
+bad_key:
+ crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+error:
+ return -EFAULT;
+}
+
+static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev;
+
+ spin_lock(&ctx->lock);
+ if (ctx->enc_cd) {
+ /* rekeying */
+ dev = &GET_DEV(ctx->inst->accel_dev);
+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+ memset(ctx->auth_hw_state_enc, 0,
+ sizeof(struct qat_auth_state));
+ memset(ctx->auth_hw_state_dec, 0,
+ sizeof(struct qat_auth_state));
+ memset(&ctx->enc_fw_req_tmpl, 0,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ memset(&ctx->dec_fw_req_tmpl, 0,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ } else {
+ /* new key */
+ int node = get_current_node();
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(node);
+ if (!inst) {
+ spin_unlock(&ctx->lock);
+ return -EINVAL;
+ }
+
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_zalloc_coherent(dev,
+ sizeof(struct qat_alg_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ spin_unlock(&ctx->lock);
+ return -ENOMEM;
+ }
+ ctx->dec_cd = dma_zalloc_coherent(dev,
+ sizeof(struct qat_alg_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ spin_unlock(&ctx->lock);
+ goto out_free_enc;
+ }
+ ctx->auth_hw_state_enc =
+ dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
+ &ctx->auth_state_enc_paddr,
+ GFP_ATOMIC);
+ if (!ctx->auth_hw_state_enc) {
+ spin_unlock(&ctx->lock);
+ goto out_free_dec;
+ }
+ ctx->auth_hw_state_dec =
+ dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
+ &ctx->auth_state_dec_paddr,
+ GFP_ATOMIC);
+ if (!ctx->auth_hw_state_dec) {
+ spin_unlock(&ctx->lock);
+ goto out_free_auth_enc;
+ }
+ }
+ spin_unlock(&ctx->lock);
+ if (qat_alg_init_sessions(ctx, key, keylen))
+ goto out_free_all;
+
+ return 0;
+
+out_free_all:
+ dma_free_coherent(dev, sizeof(struct qat_auth_state),
+ ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
+ ctx->auth_hw_state_dec = NULL;
+out_free_auth_enc:
+ dma_free_coherent(dev, sizeof(struct qat_auth_state),
+ ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
+ ctx->auth_hw_state_enc = NULL;
+out_free_dec:
+ dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ ctx->dec_cd = NULL;
+out_free_enc:
+ dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ ctx->enc_cd = NULL;
+ return -ENOMEM;
+}
+
+static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+ struct qat_crypto_request *qat_req)
+{
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_alg_buf_list *bl = qat_req->buf.bl;
+ struct qat_alg_buf_list *blout = qat_req->buf.blout;
+ dma_addr_t blp = qat_req->buf.blp;
+ dma_addr_t blpout = qat_req->buf.bloutp;
+ size_t sz = qat_req->buf.sz;
+ int i, bufs = bl->num_bufs;
+
+ for (i = 0; i < bl->num_bufs; i++)
+ dma_unmap_single(dev, bl->bufers[i].addr,
+ bl->bufers[i].len, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+ kfree(bl);
+ if (blp != blpout) {
+ /* If out of place operation dma unmap only data */
+ int bufless = bufs - blout->num_mapped_bufs;
+
+ for (i = bufless; i < bufs; i++) {
+ dma_unmap_single(dev, blout->bufers[i].addr,
+ blout->bufers[i].len,
+ DMA_BIDIRECTIONAL);
+ }
+ dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
+ kfree(blout);
+ }
+}
+
+static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ struct scatterlist *assoc,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout, uint8_t *iv,
+ uint8_t ivlen,
+ struct qat_crypto_request *qat_req)
+{
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+ struct qat_alg_buf_list *bufl;
+ struct qat_alg_buf_list *buflout = NULL;
+ dma_addr_t blp;
+ dma_addr_t bloutp = 0;
+ struct scatterlist *sg;
+ size_t sz = sizeof(struct qat_alg_buf_list) +
+ ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+
+ if (unlikely(!n))
+ return -EINVAL;
+
+ bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err;
+
+ for_each_sg(assoc, sg, assoc_n, i) {
+ bufl->bufers[bufs].addr = dma_map_single(dev,
+ sg_virt(sg),
+ sg->length,
+ DMA_BIDIRECTIONAL);
+ bufl->bufers[bufs].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+ goto err;
+ bufs++;
+ }
+ bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
+ DMA_BIDIRECTIONAL);
+ bufl->bufers[bufs].len = ivlen;
+ if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+ goto err;
+ bufs++;
+
+ for_each_sg(sgl, sg, n, i) {
+ int y = i + bufs;
+
+ bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+ sg->length,
+ DMA_BIDIRECTIONAL);
+ bufl->bufers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+ goto err;
+ }
+ bufl->num_bufs = n + bufs;
+ qat_req->buf.bl = bufl;
+ qat_req->buf.blp = blp;
+ qat_req->buf.sz = sz;
+ /* Handle out of place operation */
+ if (sgl != sglout) {
+ struct qat_alg_buf *bufers;
+
+ buflout = kmalloc_node(sz, GFP_ATOMIC,
+ inst->accel_dev->numa_node);
+ if (unlikely(!buflout))
+ goto err;
+ bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err;
+ bufers = buflout->bufers;
+ /* For out of place operation dma map only data and
+ * reuse assoc mapping and iv */
+ for (i = 0; i < bufs; i++) {
+ bufers[i].len = bufl->bufers[i].len;
+ bufers[i].addr = bufl->bufers[i].addr;
+ }
+ for_each_sg(sglout, sg, n, i) {
+ int y = i + bufs;
+
+ bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+ sg->length,
+ DMA_BIDIRECTIONAL);
+ buflout->bufers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+ goto err;
+ }
+ buflout->num_bufs = n + bufs;
+ buflout->num_mapped_bufs = n;
+ qat_req->buf.blout = buflout;
+ qat_req->buf.bloutp = bloutp;
+ } else {
+ /* Otherwise set the src and dst to the same address */
+ qat_req->buf.bloutp = qat_req->buf.blp;
+ }
+ return 0;
+err:
+ dev_err(dev, "Failed to map buf for dma\n");
+ for_each_sg(sgl, sg, n + bufs, i) {
+ if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
+ dma_unmap_single(dev, bufl->bufers[i].addr,
+ bufl->bufers[i].len,
+ DMA_BIDIRECTIONAL);
+ }
+ }
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+ kfree(bufl);
+ if (sgl != sglout && buflout) {
+ for_each_sg(sglout, sg, n, i) {
+ int y = i + bufs;
+
+ if (!dma_mapping_error(dev, buflout->bufers[y].addr))
+ dma_unmap_single(dev, buflout->bufers[y].addr,
+ buflout->bufers[y].len,
+ DMA_BIDIRECTIONAL);
+ }
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
+ kfree(buflout);
+ }
+ return -ENOMEM;
+}
+
+void qat_alg_callback(void *resp)
+{
+ struct icp_qat_fw_la_resp *qat_resp = resp;
+ struct qat_crypto_request *qat_req =
+ (void *)(__force long)qat_resp->opaque_data;
+ struct qat_alg_session_ctx *ctx = qat_req->ctx;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct aead_request *areq = qat_req->areq;
+ uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+ qat_alg_free_bufl(inst, qat_req);
+ if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ res = -EBADMSG;
+ areq->base.complete(&areq->base, res);
+}
+
+static int qat_alg_dec(struct aead_request *areq)
+{
+ struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+ struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+ int ret, ctr = 0;
+
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
+ areq->iv, AES_BLOCK_SIZE, qat_req);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->dec_fw_req_tmpl;
+ qat_req->ctx = ctx;
+ qat_req->areq = areq;
+ qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ cipher_param->cipher_length = areq->cryptlen - digst_size;
+ cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+ memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param->auth_off = 0;
+ auth_param->auth_len = areq->assoclen +
+ cipher_param->cipher_length + AES_BLOCK_SIZE;
+ do {
+ ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ } while (ret == -EAGAIN && ctr++ < 10);
+
+ if (ret == -EAGAIN) {
+ qat_alg_free_bufl(ctx->inst, qat_req);
+ return -EBUSY;
+ }
+ return -EINPROGRESS;
+}
+
+static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
+ int enc_iv)
+{
+ struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+ struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ int ret, ctr = 0;
+
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
+ iv, AES_BLOCK_SIZE, qat_req);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->enc_fw_req_tmpl;
+ qat_req->ctx = ctx;
+ qat_req->areq = areq;
+ qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ if (enc_iv) {
+ cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
+ cipher_param->cipher_offset = areq->assoclen;
+ } else {
+ memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+ cipher_param->cipher_length = areq->cryptlen;
+ cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+ }
+ auth_param->auth_off = 0;
+ auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+
+ do {
+ ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ } while (ret == -EAGAIN && ctr++ < 10);
+
+ if (ret == -EAGAIN) {
+ qat_alg_free_bufl(ctx->inst, qat_req);
+ return -EBUSY;
+ }
+ return -EINPROGRESS;
+}
+
+static int qat_alg_enc(struct aead_request *areq)
+{
+ return qat_alg_enc_internal(areq, areq->iv, 0);
+}
+
+static int qat_alg_genivenc(struct aead_givcrypt_request *req)
+{
+ struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+ struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ __be64 seq;
+
+ memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
+ seq = cpu_to_be64(req->seq);
+ memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
+ &seq, sizeof(uint64_t));
+ return qat_alg_enc_internal(&req->areq, req->giv, 1);
+}
+
+static int qat_alg_init(struct crypto_tfm *tfm,
+ enum icp_qat_hw_auth_algo hash, const char *hash_name)
+{
+ struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memset(ctx, '\0', sizeof(*ctx));
+ ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(ctx->hash_tfm))
+ return -EFAULT;
+ spin_lock_init(&ctx->lock);
+ ctx->qat_hash_alg = hash;
+ tfm->crt_aead.reqsize = sizeof(struct aead_request) +
+ sizeof(struct qat_crypto_request);
+ ctx->tfm = tfm;
+ return 0;
+}
+
+static int qat_alg_sha1_init(struct crypto_tfm *tfm)
+{
+ return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_sha256_init(struct crypto_tfm *tfm)
+{
+ return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_sha512_init(struct crypto_tfm *tfm)
+{
+ return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_exit(struct crypto_tfm *tfm)
+{
+ struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev;
+
+ if (!IS_ERR(ctx->hash_tfm))
+ crypto_free_shash(ctx->hash_tfm);
+
+ if (!inst)
+ return;
+
+ dev = &GET_DEV(inst->accel_dev);
+ if (ctx->enc_cd)
+ dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ if (ctx->dec_cd)
+ dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ if (ctx->auth_hw_state_enc)
+ dma_free_coherent(dev, sizeof(struct qat_auth_state),
+ ctx->auth_hw_state_enc,
+ ctx->auth_state_enc_paddr);
+
+ if (ctx->auth_hw_state_dec)
+ dma_free_coherent(dev, sizeof(struct qat_auth_state),
+ ctx->auth_hw_state_dec,
+ ctx->auth_state_dec_paddr);
+
+ qat_crypto_put_instance(inst);
+}
+
+static struct crypto_alg qat_algs[] = { {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = qat_alg_sha1_init,
+ .cra_exit = qat_alg_exit,
+ .cra_u = {
+ .aead = {
+ .setkey = qat_alg_setkey,
+ .decrypt = qat_alg_dec,
+ .encrypt = qat_alg_enc,
+ .givencrypt = qat_alg_genivenc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ },
+}, {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = qat_alg_sha256_init,
+ .cra_exit = qat_alg_exit,
+ .cra_u = {
+ .aead = {
+ .setkey = qat_alg_setkey,
+ .decrypt = qat_alg_dec,
+ .encrypt = qat_alg_enc,
+ .givencrypt = qat_alg_genivenc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ },
+}, {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = qat_alg_sha512_init,
+ .cra_exit = qat_alg_exit,
+ .cra_u = {
+ .aead = {
+ .setkey = qat_alg_setkey,
+ .decrypt = qat_alg_dec,
+ .encrypt = qat_alg_enc,
+ .givencrypt = qat_alg_genivenc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ },
+} };
+
+int qat_algs_register(void)
+{
+ if (atomic_add_return(1, &active_dev) == 1) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+ qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_ASYNC;
+ return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ }
+ return 0;
+}
+
+int qat_algs_unregister(void)
+{
+ if (atomic_sub_return(1, &active_dev) == 0)
+ return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ return 0;
+}
+
+int qat_algs_init(void)
+{
+ atomic_set(&active_dev, 0);
+ crypto_get_default_rng();
+ return 0;
+}
+
+void qat_algs_exit(void)
+{
+ crypto_put_default_rng();
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
new file mode 100644
index 000000000000..0d59bcb50de1
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -0,0 +1,284 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+ if (atomic_sub_return(1, &inst->refctr) == 0)
+ adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+ struct qat_crypto_instance *inst;
+ struct list_head *list_ptr, *tmp;
+ int i;
+
+ list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
+ inst = list_entry(list_ptr, struct qat_crypto_instance, list);
+
+ for (i = 0; i < atomic_read(&inst->refctr); i++)
+ qat_crypto_put_instance(inst);
+
+ if (inst->sym_tx)
+ adf_remove_ring(inst->sym_tx);
+
+ if (inst->sym_rx)
+ adf_remove_ring(inst->sym_rx);
+
+ if (inst->pke_tx)
+ adf_remove_ring(inst->pke_tx);
+
+ if (inst->pke_rx)
+ adf_remove_ring(inst->pke_rx);
+
+ if (inst->rnd_tx)
+ adf_remove_ring(inst->rnd_tx);
+
+ if (inst->rnd_rx)
+ adf_remove_ring(inst->rnd_rx);
+
+ list_del(list_ptr);
+ kfree(inst);
+ }
+ return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+ struct adf_accel_dev *accel_dev = NULL;
+ struct qat_crypto_instance *inst_best = NULL;
+ struct list_head *itr;
+ unsigned long best = ~0;
+
+ list_for_each(itr, adf_devmgr_get_head()) {
+ accel_dev = list_entry(itr, struct adf_accel_dev, list);
+ if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+ break;
+ accel_dev = NULL;
+ }
+ if (!accel_dev) {
+ pr_err("QAT: Could not find device on give node\n");
+ accel_dev = adf_devmgr_get_first();
+ }
+ if (!accel_dev || !adf_dev_started(accel_dev))
+ return NULL;
+
+ list_for_each(itr, &accel_dev->crypto_list) {
+ struct qat_crypto_instance *inst;
+ unsigned long cur;
+
+ inst = list_entry(itr, struct qat_crypto_instance, list);
+ cur = atomic_read(&inst->refctr);
+ if (best > cur) {
+ inst_best = inst;
+ best = cur;
+ }
+ }
+ if (inst_best) {
+ if (atomic_add_return(1, &inst_best->refctr) == 1) {
+ if (adf_dev_get(accel_dev)) {
+ atomic_dec(&inst_best->refctr);
+ pr_err("QAT: Could increment dev refctr\n");
+ return NULL;
+ }
+ }
+ }
+ return inst_best;
+}
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+ int i;
+ unsigned long bank;
+ unsigned long num_inst, num_msg_sym, num_msg_asym;
+ int msg_size;
+ struct qat_crypto_instance *inst;
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ strlcpy(key, ADF_NUM_CY, sizeof(key));
+
+ if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+ return -EFAULT;
+
+ if (kstrtoul(val, 0, &num_inst))
+ return -EFAULT;
+
+ for (i = 0; i < num_inst; i++) {
+ inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+ accel_dev->numa_node);
+ if (!inst)
+ goto err;
+
+ list_add_tail(&inst->list, &accel_dev->crypto_list);
+ inst->id = i;
+ atomic_set(&inst->refctr, 0);
+ inst->accel_dev = accel_dev;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+ if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+ goto err;
+
+ if (kstrtoul(val, 10, &bank))
+ goto err;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+ goto err;
+
+ if (kstrtoul(val, 10, &num_msg_sym))
+ goto err;
+ num_msg_sym = num_msg_sym >> 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+ goto err;
+
+ if (kstrtoul(val, 10, &num_msg_asym))
+ goto err;
+ num_msg_asym = num_msg_asym >> 1;
+
+ msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+ msg_size, key, NULL, 0, &inst->sym_tx))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
+ if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+ msg_size, key, NULL, 0, &inst->rnd_tx))
+ goto err;
+
+ msg_size = msg_size >> 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+ msg_size, key, NULL, 0, &inst->pke_tx))
+ goto err;
+
+ msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+ msg_size, key, qat_alg_callback, 0,
+ &inst->sym_rx))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
+ if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+ msg_size, key, qat_alg_callback, 0,
+ &inst->rnd_rx))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+ msg_size, key, qat_alg_callback, 0,
+ &inst->pke_rx))
+ goto err;
+ }
+ return 0;
+err:
+ qat_crypto_free_instances(accel_dev);
+ return -ENOMEM;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+ if (qat_crypto_create_instances(accel_dev))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+ return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+ enum adf_event event)
+{
+ int ret;
+
+ switch (event) {
+ case ADF_EVENT_INIT:
+ ret = qat_crypto_init(accel_dev);
+ break;
+ case ADF_EVENT_SHUTDOWN:
+ ret = qat_crypto_shutdown(accel_dev);
+ break;
+ case ADF_EVENT_RESTARTING:
+ case ADF_EVENT_RESTARTED:
+ case ADF_EVENT_START:
+ case ADF_EVENT_STOP:
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+int qat_crypto_register(void)
+{
+ memset(&qat_crypto, 0, sizeof(qat_crypto));
+ qat_crypto.event_hld = qat_crypto_event_handler;
+ qat_crypto.name = "qat_crypto";
+ return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+ return adf_service_unregister(&qat_crypto);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
new file mode 100644
index 000000000000..ab8468d11ddb
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -0,0 +1,83 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+
+struct qat_crypto_instance {
+ struct adf_etr_ring_data *sym_tx;
+ struct adf_etr_ring_data *sym_rx;
+ struct adf_etr_ring_data *pke_tx;
+ struct adf_etr_ring_data *pke_rx;
+ struct adf_etr_ring_data *rnd_tx;
+ struct adf_etr_ring_data *rnd_rx;
+ struct adf_accel_dev *accel_dev;
+ struct list_head list;
+ unsigned long state;
+ int id;
+ atomic_t refctr;
+};
+
+struct qat_crypto_request_buffs {
+ struct qat_alg_buf_list *bl;
+ dma_addr_t blp;
+ struct qat_alg_buf_list *blout;
+ dma_addr_t bloutp;
+ size_t sz;
+};
+
+struct qat_crypto_request {
+ struct icp_qat_fw_la_bulk_req req;
+ struct qat_alg_session_ctx *ctx;
+ struct aead_request *areq;
+ struct qat_crypto_request_buffs buf;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
new file mode 100644
index 000000000000..9b8a31521ff3
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -0,0 +1,1393 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR 0xffff
+#define MAX_RETRY_TIMES 10000
+#define INIT_CTX_ARB_VALUE 0x0
+#define INIT_CTX_ENABLE_VALUE 0x0
+#define INIT_PC_VALUE 0x0
+#define INIT_WAKEUP_EVENTS_VALUE 0x1
+#define INIT_SIG_EVENTS_VALUE 0x1
+#define INIT_CCENABLE_VALUE 0x2000
+#define RST_CSR_QAT_LSB 20
+#define RST_CSR_AE_LSB 0
+#define MC_TIMESTAMP_ENABLE (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+ (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+ (~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+ (inst = ((inst & 0xFFFF00C03FFull) | \
+ ((((const_val) << 12) & 0x0FF00000ull) | \
+ (((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+ (inst = ((inst & 0xFFFF00FFF00ull) | \
+ ((((const_val) << 12) & 0x0FF00000ull) | \
+ (((const_val) << 0) & 0x000000FFull))))
+
+#define AE(handle, ae) handle->hal_handle->aes[ae]
+
+static const uint64_t inst_4b[] = {
+ 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+ 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+ 0x0A021000000ull
+};
+
+static const uint64_t inst[] = {
+ 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+ 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+ 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+ 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+ 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+ 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+ 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+ 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+ 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+ 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+ 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+ 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+ 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+ 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+ 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+ 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+ 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+ 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+ 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+ 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+ 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+ 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask)
+{
+ AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int csr,
+ unsigned int *value)
+{
+ unsigned int iterations = CSR_RETRY_TIMES;
+
+ do {
+ *value = GET_AE_CSR(handle, ae, csr);
+ if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+ return 0;
+ } while (iterations--);
+
+ pr_err("QAT: Read CSR timeout\n");
+ return -EFAULT;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int csr,
+ unsigned int value)
+{
+ unsigned int iterations = CSR_RETRY_TIMES;
+
+ do {
+ SET_AE_CSR(handle, ae, csr, value);
+ if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+ return 0;
+ } while (iterations--);
+
+ pr_err("QAT: Write CSR Timeout\n");
+ return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ unsigned int *events)
+{
+ unsigned int cur_ctx;
+
+ qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int cycles,
+ int chk_inactive)
+{
+ unsigned int base_cnt = 0, cur_cnt = 0;
+ unsigned int csr = (1 << ACS_ABO_BITPOS);
+ int times = MAX_RETRY_TIMES;
+ int elapsed_cycles = 0;
+
+ qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+ base_cnt &= 0xffff;
+ while ((int)cycles > elapsed_cycles && times--) {
+ if (chk_inactive)
+ qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+
+ qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+ cur_cnt &= 0xffff;
+ elapsed_cycles = cur_cnt - base_cnt;
+
+ if (elapsed_cycles < 0)
+ elapsed_cycles += 0x10000;
+
+ /* ensure at least 8 time cycles elapsed in wait_cycles */
+ if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+ return 0;
+ }
+ if (!times) {
+ pr_err("QAT: wait_num_cycles time out\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
+#define SET_BIT(wrd, bit) (wrd | 1 << bit)
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode)
+{
+ unsigned int csr, new_csr;
+
+ if ((mode != 4) && (mode != 8)) {
+ pr_err("QAT: bad ctx mode=%d\n", mode);
+ return -EINVAL;
+ }
+
+ /* Sets the accelaration engine context mode to either four or eight */
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = IGNORE_W1C_MASK & csr;
+ new_csr = (mode == 4) ?
+ SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+ CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+ return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode)
+{
+ unsigned int csr, new_csr;
+
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr &= IGNORE_W1C_MASK;
+
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_NN_MODE_BITPOS) :
+ CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+ if (new_csr != csr)
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+ return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, enum icp_qat_uof_regtype lm_type,
+ unsigned char mode)
+{
+ unsigned int csr, new_csr;
+
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr &= IGNORE_W1C_MASK;
+ switch (lm_type) {
+ case ICP_LMEM0:
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+ CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+ break;
+ case ICP_LMEM1:
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+ CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+ break;
+ default:
+ pr_err("QAT: lmType = 0x%x\n", lm_type);
+ return -EINVAL;
+ }
+
+ if (new_csr != csr)
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+ return 0;
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+ unsigned short reg_num)
+{
+ unsigned short reg_addr;
+
+ switch (type) {
+ case ICP_GPA_ABS:
+ case ICP_GPB_ABS:
+ reg_addr = 0x80 | (reg_num & 0x7f);
+ break;
+ case ICP_GPA_REL:
+ case ICP_GPB_REL:
+ reg_addr = reg_num & 0x1f;
+ break;
+ case ICP_SR_RD_REL:
+ case ICP_SR_WR_REL:
+ case ICP_SR_REL:
+ reg_addr = 0x180 | (reg_num & 0x1f);
+ break;
+ case ICP_SR_ABS:
+ reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+ break;
+ case ICP_DR_RD_REL:
+ case ICP_DR_WR_REL:
+ case ICP_DR_REL:
+ reg_addr = 0x1c0 | (reg_num & 0x1f);
+ break;
+ case ICP_DR_ABS:
+ reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+ break;
+ case ICP_NEIGH_REL:
+ reg_addr = 0x280 | (reg_num & 0x1f);
+ break;
+ case ICP_LMEM0:
+ reg_addr = 0x200;
+ break;
+ case ICP_LMEM1:
+ reg_addr = 0x220;
+ break;
+ case ICP_NO_DEST:
+ reg_addr = 0x300 | (reg_num & 0xff);
+ break;
+ default:
+ reg_addr = BAD_REGADDR;
+ break;
+ }
+ return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned int ae_reset_csr;
+
+ ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+ ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
+ ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
+ SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask,
+ unsigned int ae_csr, unsigned int csr_val)
+{
+ unsigned int ctx, cur_ctx;
+
+ qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+ if (!(ctx_mask & (1 << ctx)))
+ continue;
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+ }
+
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ unsigned int ae_csr, unsigned int *csr_val)
+{
+ unsigned int cur_ctx;
+
+ qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask,
+ unsigned int events)
+{
+ unsigned int ctx, cur_ctx;
+
+ qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+ if (!(ctx_mask & (1 << ctx)))
+ continue;
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+ }
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask,
+ unsigned int events)
+{
+ unsigned int ctx, cur_ctx;
+
+ qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+ if (!(ctx_mask & (1 << ctx)))
+ continue;
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+ events);
+ }
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned int base_cnt, cur_cnt;
+ unsigned char ae;
+ unsigned int times = MAX_RETRY_TIMES;
+
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (!(handle->hal_handle->ae_mask & (1 << ae)))
+ continue;
+
+ qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+ (unsigned int *)&base_cnt);
+ base_cnt &= 0xffff;
+
+ do {
+ qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+ (unsigned int *)&cur_cnt);
+ cur_cnt &= 0xffff;
+ } while (times-- && (cur_cnt == base_cnt));
+
+ if (!times) {
+ pr_err("QAT: AE%d is inactive!!\n", ae);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned int misc_ctl;
+ unsigned char ae;
+
+ /* stop the timestamp timers */
+ misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
+ if (misc_ctl & MC_TIMESTAMP_ENABLE)
+ SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
+ (~MC_TIMESTAMP_ENABLE));
+
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (!(handle->hal_handle->ae_mask & (1 << ae)))
+ continue;
+ qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+ qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+ }
+ /* start timestamp timers */
+ SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT (1<<2)
+#define ESRAM_AUTO_TINIT_DONE (1<<3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+ void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
+ ESRAM_AUTO_INIT_CSR_OFFSET;
+ unsigned int csr_val, times = 30;
+
+ csr_val = ADF_CSR_RD(csr_addr, 0);
+ if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+ return 0;
+
+ csr_val = ADF_CSR_RD(csr_addr, 0);
+ csr_val |= ESRAM_AUTO_TINIT;
+ ADF_CSR_WR(csr_addr, 0, csr_val);
+
+ do {
+ qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+ csr_val = ADF_CSR_RD(csr_addr, 0);
+ } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+ if ((!times)) {
+ pr_err("QAT: Fail to init eSram!\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned int ae_reset_csr;
+ unsigned char ae;
+ unsigned int clk_csr;
+ unsigned int times = 100;
+ unsigned int csr;
+
+ /* write to the reset csr */
+ ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+ ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
+ ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
+ do {
+ SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+ if (!(times--))
+ goto out_err;
+ csr = GET_GLB_CSR(handle, ICP_RESET);
+ } while ((handle->hal_handle->ae_mask |
+ (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
+ /* enable clock */
+ clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
+ clk_csr |= handle->hal_handle->ae_mask << 0;
+ clk_csr |= handle->hal_handle->slice_mask << 20;
+ SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
+ if (qat_hal_check_ae_alive(handle))
+ goto out_err;
+
+ /* Set undefined power-up/reset states to reasonable default values */
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (!(handle->hal_handle->ae_mask & (1 << ae)))
+ continue;
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+ INIT_CTX_ENABLE_VALUE);
+ qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+ CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask &
+ INIT_PC_VALUE);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+ qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+ qat_hal_put_wakeup_event(handle, ae,
+ ICP_QAT_UCLO_AE_ALL_CTX,
+ INIT_WAKEUP_EVENTS_VALUE);
+ qat_hal_put_sig_event(handle, ae,
+ ICP_QAT_UCLO_AE_ALL_CTX,
+ INIT_SIG_EVENTS_VALUE);
+ }
+ if (qat_hal_init_esram(handle))
+ goto out_err;
+ if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+ goto out_err;
+ qat_hal_reset_timestamp(handle);
+
+ return 0;
+out_err:
+ pr_err("QAT: failed to get device out of reset\n");
+ return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask)
+{
+ unsigned int ctx;
+
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx &= IGNORE_W1C_MASK &
+ (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static uint64_t qat_hal_parity_64bit(uint64_t word)
+{
+ word ^= word >> 1;
+ word ^= word >> 2;
+ word ^= word >> 4;
+ word ^= word >> 8;
+ word ^= word >> 16;
+ word ^= word >> 32;
+ return word & 1;
+}
+
+static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
+{
+ uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+ bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+ bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+ bit6_mask = 0xdaf69a46910ULL;
+
+ /* clear the ecc bits */
+ uword &= ~(0x7fULL << 0x2C);
+ uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+ uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+ uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+ uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+ uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+ uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+ uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+ return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int uaddr,
+ unsigned int words_num, uint64_t *uword)
+{
+ unsigned int ustore_addr;
+ unsigned int i;
+
+ qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ uaddr |= UA_ECS;
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ for (i = 0; i < words_num; i++) {
+ unsigned int uwrd_lo, uwrd_hi;
+ uint64_t tmp;
+
+ tmp = qat_hal_set_uword_ecc(uword[i]);
+ uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+ uwrd_hi = (unsigned int)(tmp >> 0x20);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+ }
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask)
+{
+ unsigned int ctx;
+
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx &= IGNORE_W1C_MASK;
+ ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+ ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned char ae;
+ unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+ int times = MAX_RETRY_TIMES;
+ unsigned int csr_val = 0;
+ unsigned short reg;
+ unsigned int savctx = 0;
+ int ret = 0;
+
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (!(handle->hal_handle->ae_mask & (1 << ae)))
+ continue;
+ for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+ qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+ reg, 0);
+ qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+ reg, 0);
+ }
+ qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+ csr_val &= IGNORE_W1C_MASK;
+ csr_val |= CE_NN_MODE;
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+ qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+ (uint64_t *)inst);
+ qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask &
+ INIT_PC_VALUE);
+ qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+ qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+ qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+ CTX_SIG_EVENTS_INDIRECT, 0);
+ qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+ qat_hal_enable_ctx(handle, ae, ctx_mask);
+ }
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (!(handle->hal_handle->ae_mask & (1 << ae)))
+ continue;
+ /* wait for AE to finish */
+ do {
+ ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+ } while (ret && times--);
+
+ if (!times) {
+ pr_err("QAT: clear GPR of AE %d failed", ae);
+ return -EINVAL;
+ }
+ qat_hal_disable_ctx(handle, ae, ctx_mask);
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+ savctx & ACS_ACNO);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+ INIT_CTX_ENABLE_VALUE);
+ qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask &
+ INIT_PC_VALUE);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+ qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+ qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+ INIT_WAKEUP_EVENTS_VALUE);
+ qat_hal_put_sig_event(handle, ae, ctx_mask,
+ INIT_SIG_EVENTS_VALUE);
+ }
+ return 0;
+}
+
+#define ICP_DH895XCC_AE_OFFSET 0x20000
+#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET 0x800
+#define ICP_DH895XCC_EP_OFFSET 0x3a000
+#define ICP_DH895XCC_PMISC_BAR 1
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+ unsigned char ae;
+ unsigned int max_en_ae_id = 0;
+ struct icp_qat_fw_loader_handle *handle;
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+ ICP_DH895XCC_CAP_OFFSET;
+ handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+ ICP_DH895XCC_AE_OFFSET;
+ handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+ handle->hal_cap_ae_local_csr_addr_v =
+ handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
+
+ handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+ if (!handle->hal_handle)
+ goto out_hal_handle;
+ handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+ handle->hal_handle->ae_mask = hw_data->ae_mask;
+ handle->hal_handle->slice_mask = hw_data->accel_mask;
+ /* create AE objects */
+ handle->hal_handle->upc_mask = 0x1ffff;
+ handle->hal_handle->max_ustore = 0x4000;
+ for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
+ if (!(hw_data->ae_mask & (1 << ae)))
+ continue;
+ handle->hal_handle->aes[ae].free_addr = 0;
+ handle->hal_handle->aes[ae].free_size =
+ handle->hal_handle->max_ustore;
+ handle->hal_handle->aes[ae].ustore_size =
+ handle->hal_handle->max_ustore;
+ handle->hal_handle->aes[ae].live_ctx_mask =
+ ICP_QAT_UCLO_AE_ALL_CTX;
+ max_en_ae_id = ae;
+ }
+ handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+ /* take all AEs out of reset */
+ if (qat_hal_clr_reset(handle)) {
+ pr_err("QAT: qat_hal_clr_reset error\n");
+ goto out_err;
+ }
+ if (qat_hal_clear_gpr(handle))
+ goto out_err;
+ /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ unsigned int csr_val = 0;
+
+ if (!(hw_data->ae_mask & (1 << ae)))
+ continue;
+ qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+ csr_val |= 0x1;
+ qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+ }
+ accel_dev->fw_loader->fw_loader = handle;
+ return 0;
+
+out_err:
+ kfree(handle->hal_handle);
+out_hal_handle:
+ kfree(handle);
+ return -EFAULT;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+ if (!handle)
+ return;
+ kfree(handle->hal_handle);
+ kfree(handle);
+}
+
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+ unsigned int ctx_mask)
+{
+ qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
+ ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
+ qat_hal_enable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+ unsigned int ctx_mask)
+{
+ qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+ qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int uaddr,
+ unsigned int words_num, uint64_t *uword)
+{
+ unsigned int i, uwrd_lo, uwrd_hi;
+ unsigned int ustore_addr, misc_control;
+
+ qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+ misc_control & 0xfffffffb);
+ qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ uaddr |= UA_ECS;
+ for (i = 0; i < words_num; i++) {
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ uaddr++;
+ qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
+ qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+ uword[i] = uwrd_hi;
+ uword[i] = (uword[i] << 0x20) | uwrd_lo;
+ }
+ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int uaddr,
+ unsigned int words_num, unsigned int *data)
+{
+ unsigned int i, ustore_addr;
+
+ qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ uaddr |= UA_ECS;
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ for (i = 0; i < words_num; i++) {
+ unsigned int uwrd_lo, uwrd_hi, tmp;
+
+ uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+ ((data[i] & 0xff00) << 2) |
+ (0x3 << 8) | (data[i] & 0xff);
+ uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+ uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+ tmp = ((data[i] >> 0x10) & 0xffff);
+ uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+ }
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ uint64_t *micro_inst, unsigned int inst_num,
+ int code_off, unsigned int max_cycle,
+ unsigned int *endpc)
+{
+ uint64_t savuwords[MAX_EXEC_INST];
+ unsigned int ind_lm_addr0, ind_lm_addr1;
+ unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
+ unsigned int ind_cnt_sig;
+ unsigned int ind_sig, act_sig;
+ unsigned int csr_val = 0, newcsr_val;
+ unsigned int savctx;
+ unsigned int savcc, wakeup_events, savpc;
+ unsigned int ctxarb_ctl, ctx_enables;
+
+ if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+ pr_err("QAT: invalid instruction num %d\n", inst_num);
+ return -EINVAL;
+ }
+ /* save current context */
+ qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
+ qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
+ qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
+ &ind_lm_addr_byte0);
+ qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
+ &ind_lm_addr_byte1);
+ if (inst_num <= MAX_EXEC_INST)
+ qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+ qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+ qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+ savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables &= IGNORE_W1C_MASK;
+ qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
+ qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
+ qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
+ &ind_cnt_sig);
+ qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
+ qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+ /* execute micro codes */
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+ qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+ if (code_off)
+ qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+ qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+ qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+ qat_hal_enable_ctx(handle, ae, (1 << ctx));
+ /* wait for micro codes to finish */
+ if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+ return -EFAULT;
+ if (endpc) {
+ unsigned int ctx_status;
+
+ qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
+ &ctx_status);
+ *endpc = ctx_status & handle->hal_handle->upc_mask;
+ }
+ /* retore to saved context */
+ qat_hal_disable_ctx(handle, ae, (1 << ctx));
+ if (inst_num <= MAX_EXEC_INST)
+ qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+ qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask & savpc);
+ qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+ qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ LM_ADDR_0_INDIRECT, ind_lm_addr0);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ LM_ADDR_1_INDIRECT, ind_lm_addr1);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ CTX_SIG_EVENTS_INDIRECT, ind_sig);
+ qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+ return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int *data)
+{
+ unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+ unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+ unsigned short reg_addr;
+ int status = 0;
+ uint64_t insts, savuword;
+
+ reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+ if (reg_addr == BAD_REGADDR) {
+ pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+ return -EINVAL;
+ }
+ switch (reg_type) {
+ case ICP_GPA_REL:
+ insts = 0xA070000000ull | (reg_addr & 0x3ff);
+ break;
+ default:
+ insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+ break;
+ }
+ qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables &= IGNORE_W1C_MASK;
+ if (ctx != (savctx & ACS_ACNO))
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+ ctx & ACS_ACNO);
+ qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+ qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ uaddr = UA_ECS;
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ insts = qat_hal_set_uword_ecc(insts);
+ uwrd_lo = (unsigned int)(insts & 0xffffffff);
+ uwrd_hi = (unsigned int)(insts >> 0x20);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ /* delay for at least 8 cycles */
+ qat_hal_wait_cycles(handle, ae, 0x8, 0);
+ /*
+ * read ALU output
+ * the instruction should have been executed
+ * prior to clearing the ECS in putUwords
+ */
+ qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+ qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+ if (ctx != (savctx & ACS_ACNO))
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+ savctx & ACS_ACNO);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+ return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int data)
+{
+ unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+ uint64_t insts[] = {
+ 0x0F440000000ull,
+ 0x0F040000000ull,
+ 0x0F0000C0300ull,
+ 0x0E000010000ull
+ };
+ const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+ const int imm_w1 = 0, imm_w0 = 1;
+
+ dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+ if (dest_addr == BAD_REGADDR) {
+ pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+ return -EINVAL;
+ }
+
+ data16lo = 0xffff & data;
+ data16hi = 0xffff & (data >> 0x10);
+ src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+ (0xff & data16hi));
+ src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+ (0xff & data16lo));
+ switch (reg_type) {
+ case ICP_GPA_REL:
+ insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+ ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+ insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+ ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+ break;
+ default:
+ insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+ ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+ insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+ ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+ break;
+ }
+
+ return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+ code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+ return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(uint64_t *micro_inst,
+ unsigned int inst_num, unsigned int size,
+ unsigned int addr, unsigned int *value)
+{
+ int i, val_indx;
+ unsigned int cur_value;
+ const uint64_t *inst_arr;
+ int fixup_offset;
+ int usize = 0;
+ int orig_num;
+
+ orig_num = inst_num;
+ val_indx = 0;
+ cur_value = value[val_indx++];
+ inst_arr = inst_4b;
+ usize = ARRAY_SIZE(inst_4b);
+ fixup_offset = inst_num;
+ for (i = 0; i < usize; i++)
+ micro_inst[inst_num++] = inst_arr[i];
+ INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+ fixup_offset++;
+ INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+ fixup_offset++;
+ INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+ fixup_offset++;
+ INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+ return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ int *pfirst_exec, uint64_t *micro_inst,
+ unsigned int inst_num)
+{
+ int stat = 0;
+ unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+ unsigned int gprb0 = 0, gprb1 = 0;
+
+ if (*pfirst_exec) {
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+ *pfirst_exec = 0;
+ }
+ stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+ inst_num * 0x5, NULL);
+ if (stat != 0)
+ return -EFAULT;
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+ return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae,
+ struct icp_qat_uof_batch_init *lm_init_header)
+{
+ struct icp_qat_uof_batch_init *plm_init;
+ uint64_t *micro_inst_arry;
+ int micro_inst_num;
+ int alloc_inst_size;
+ int first_exec = 1;
+ int stat = 0;
+
+ plm_init = lm_init_header->next;
+ alloc_inst_size = lm_init_header->size;
+ if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+ alloc_inst_size = handle->hal_handle->max_ustore;
+ micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+ GFP_KERNEL);
+ if (!micro_inst_arry)
+ return -ENOMEM;
+ micro_inst_num = 0;
+ while (plm_init) {
+ unsigned int addr, *value, size;
+
+ ae = plm_init->ae;
+ addr = plm_init->addr;
+ value = plm_init->value;
+ size = plm_init->size;
+ micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+ micro_inst_num,
+ size, addr, value);
+ plm_init = plm_init->next;
+ }
+ /* exec micro codes */
+ if (micro_inst_arry && (micro_inst_num > 0)) {
+ micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+ stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+ micro_inst_arry,
+ micro_inst_num);
+ }
+ kfree(micro_inst_arry);
+ return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int val)
+{
+ int status = 0;
+ unsigned int reg_addr;
+ unsigned int ctx_enables;
+ unsigned short mask;
+ unsigned short dr_offset = 0x10;
+
+ status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ if (CE_INUSE_CONTEXTS & ctx_enables) {
+ if (ctx & 0x1) {
+ pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+ return -EINVAL;
+ }
+ mask = 0x1f;
+ dr_offset = 0x20;
+ } else {
+ mask = 0x0f;
+ }
+ if (reg_num & ~mask)
+ return -EINVAL;
+ reg_addr = reg_num + (ctx << 0x5);
+ switch (reg_type) {
+ case ICP_SR_RD_REL:
+ case ICP_SR_REL:
+ SET_AE_XFER(handle, ae, reg_addr, val);
+ break;
+ case ICP_DR_RD_REL:
+ case ICP_DR_REL:
+ SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int data)
+{
+ unsigned int gprval, ctx_enables;
+ unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+ data16low;
+ unsigned short reg_mask;
+ int status = 0;
+ uint64_t micro_inst[] = {
+ 0x0F440000000ull,
+ 0x0F040000000ull,
+ 0x0A000000000ull,
+ 0x0F0000C0300ull,
+ 0x0E000010000ull
+ };
+ const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+ const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ if (CE_INUSE_CONTEXTS & ctx_enables) {
+ if (ctx & 0x1) {
+ pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+ return -EINVAL;
+ }
+ reg_mask = (unsigned short)~0x1f;
+ } else {
+ reg_mask = (unsigned short)~0xf;
+ }
+ if (reg_num & reg_mask)
+ return -EINVAL;
+ xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+ if (xfr_addr == BAD_REGADDR) {
+ pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+ return -EINVAL;
+ }
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+ gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+ data16low = 0xffff & data;
+ data16hi = 0xffff & (data >> 0x10);
+ src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+ (unsigned short)(0xff & data16hi));
+ src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+ (unsigned short)(0xff & data16low));
+ micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+ ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+ micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+ ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+ micro_inst[0x2] = micro_inst[0x2] |
+ ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+ status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+ code_off, dly, NULL);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+ return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ unsigned short nn, unsigned int val)
+{
+ unsigned int ctx_enables;
+ int stat = 0;
+
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables &= IGNORE_W1C_MASK;
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+ stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+ return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+ *handle, unsigned char ae,
+ unsigned short absreg_num,
+ unsigned short *relreg,
+ unsigned char *ctx)
+{
+ unsigned int ctx_enables;
+
+ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ if (ctx_enables & CE_INUSE_CONTEXTS) {
+ /* 4-ctx mode */
+ *relreg = absreg_num & 0x1F;
+ *ctx = (absreg_num >> 0x4) & 0x6;
+ } else {
+ /* 8-ctx mode */
+ *relreg = absreg_num & 0x0F;
+ *ctx = (absreg_num >> 0x4) & 0x7;
+ }
+ return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata)
+{
+ int stat = 0;
+ unsigned short reg;
+ unsigned char ctx = 0;
+ enum icp_qat_uof_regtype type;
+
+ if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+ return -EINVAL;
+
+ do {
+ if (ctx_mask == 0) {
+ qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+ &ctx);
+ type = reg_type - 1;
+ } else {
+ reg = reg_num;
+ type = reg_type;
+ if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+ continue;
+ }
+ stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+ if (stat) {
+ pr_err("QAT: write gpr fail\n");
+ return -EINVAL;
+ }
+ } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+ return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata)
+{
+ int stat = 0;
+ unsigned short reg;
+ unsigned char ctx = 0;
+ enum icp_qat_uof_regtype type;
+
+ if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+ return -EINVAL;
+
+ do {
+ if (ctx_mask == 0) {
+ qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+ &ctx);
+ type = reg_type - 3;
+ } else {
+ reg = reg_num;
+ type = reg_type;
+ if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+ continue;
+ }
+ stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+ regdata);
+ if (stat) {
+ pr_err("QAT: write wr xfer fail\n");
+ return -EINVAL;
+ }
+ } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+ return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata)
+{
+ int stat = 0;
+ unsigned short reg;
+ unsigned char ctx = 0;
+ enum icp_qat_uof_regtype type;
+
+ if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+ return -EINVAL;
+
+ do {
+ if (ctx_mask == 0) {
+ qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+ &ctx);
+ type = reg_type - 3;
+ } else {
+ reg = reg_num;
+ type = reg_type;
+ if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+ continue;
+ }
+ stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+ regdata);
+ if (stat) {
+ pr_err("QAT: write rd xfer fail\n");
+ return -EINVAL;
+ }
+ } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+ return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ unsigned short reg_num, unsigned int regdata)
+{
+ int stat = 0;
+ unsigned char ctx;
+
+ if (ctx_mask == 0)
+ return -EINVAL;
+
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+ if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+ continue;
+ stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+ if (stat) {
+ pr_err("QAT: write neigh error\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
new file mode 100644
index 000000000000..1e27f9f7fddf
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -0,0 +1,1181 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+ unsigned int ae, unsigned int image_num)
+{
+ struct icp_qat_uclo_aedata *ae_data;
+ struct icp_qat_uclo_encapme *encap_image;
+ struct icp_qat_uclo_page *page = NULL;
+ struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+ ae_data = &obj_handle->ae_data[ae];
+ encap_image = &obj_handle->ae_uimage[image_num];
+ ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+ ae_slice->encap_image = encap_image;
+
+ if (encap_image->img_ptr) {
+ ae_slice->ctx_mask_assigned =
+ encap_image->img_ptr->ctx_assigned;
+ ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+ } else {
+ ae_slice->ctx_mask_assigned = 0;
+ }
+ ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+ if (!ae_slice->region)
+ return -ENOMEM;
+ ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+ if (!ae_slice->page)
+ goto out_err;
+ page = ae_slice->page;
+ page->encap_page = encap_image->page;
+ ae_slice->page->region = ae_slice->region;
+ ae_data->slice_num++;
+ return 0;
+out_err:
+ kfree(ae_slice->region);
+ ae_slice->region = NULL;
+ return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+ unsigned int i;
+
+ if (!ae_data) {
+ pr_err("QAT: bad argument, ae_data is NULL\n ");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ae_data->slice_num; i++) {
+ kfree(ae_data->ae_slices[i].region);
+ ae_data->ae_slices[i].region = NULL;
+ kfree(ae_data->ae_slices[i].page);
+ ae_data->ae_slices[i].page = NULL;
+ }
+ return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+ unsigned int str_offset)
+{
+ if ((!str_table->table_len) || (str_offset > str_table->table_len))
+ return NULL;
+ return (char *)(((unsigned long)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
+{
+ int maj = hdr->maj_ver & 0xff;
+ int min = hdr->min_ver & 0xff;
+
+ if (hdr->file_id != ICP_QAT_UOF_FID) {
+ pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+ return -EINVAL;
+ }
+ if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+ pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+ maj, min);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+ unsigned int addr, unsigned int *val,
+ unsigned int num_in_bytes)
+{
+ unsigned int outval;
+ unsigned char *ptr = (unsigned char *)val;
+
+ while (num_in_bytes) {
+ memcpy(&outval, ptr, 4);
+ SRAM_WRITE(handle, addr, outval);
+ num_in_bytes -= 4;
+ ptr += 4;
+ addr += 4;
+ }
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int addr,
+ unsigned int *val,
+ unsigned int num_in_bytes)
+{
+ unsigned int outval;
+ unsigned char *ptr = (unsigned char *)val;
+
+ addr >>= 0x2; /* convert to uword address */
+
+ while (num_in_bytes) {
+ memcpy(&outval, ptr, 4);
+ qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+ num_in_bytes -= 4;
+ ptr += 4;
+ }
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae,
+ struct icp_qat_uof_batch_init
+ *umem_init_header)
+{
+ struct icp_qat_uof_batch_init *umem_init;
+
+ if (!umem_init_header)
+ return;
+ umem_init = umem_init_header->next;
+ while (umem_init) {
+ unsigned int addr, *value, size;
+
+ ae = umem_init->ae;
+ addr = umem_init->addr;
+ value = umem_init->value;
+ size = umem_init->size;
+ qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+ umem_init = umem_init->next;
+ }
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_batch_init **base)
+{
+ struct icp_qat_uof_batch_init *umem_init;
+
+ umem_init = *base;
+ while (umem_init) {
+ struct icp_qat_uof_batch_init *pre;
+
+ pre = umem_init;
+ umem_init = umem_init->next;
+ kfree(pre);
+ }
+ *base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+ char buf[16] = {0};
+ unsigned long ae = 0;
+ int i;
+
+ strncpy(buf, str, 15);
+ for (i = 0; i < 16; i++) {
+ if (!isdigit(buf[i])) {
+ buf[i] = '\0';
+ break;
+ }
+ }
+ if ((kstrtoul(buf, 10, &ae)))
+ return -EFAULT;
+
+ *num = (unsigned int)ae;
+ return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_initmem *init_mem,
+ unsigned int size_range, unsigned int *ae)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ char *str;
+
+ if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+ pr_err("QAT: initmem is out of range");
+ return -EINVAL;
+ }
+ if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+ pr_err("QAT: Memory scope for init_mem error\n");
+ return -EINVAL;
+ }
+ str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+ if (!str) {
+ pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+ return -EINVAL;
+ }
+ if (qat_uclo_parse_num(str, ae)) {
+ pr_err("QAT: Parse num for AE number failed\n");
+ return -EINVAL;
+ }
+ if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+ pr_err("QAT: ae %d out of range\n", *ae);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+ *handle, struct icp_qat_uof_initmem
+ *init_mem, unsigned int ae,
+ struct icp_qat_uof_batch_init
+ **init_tab_base)
+{
+ struct icp_qat_uof_batch_init *init_header, *tail;
+ struct icp_qat_uof_batch_init *mem_init, *tail_old;
+ struct icp_qat_uof_memvar_attr *mem_val_attr;
+ unsigned int i, flag = 0;
+
+ mem_val_attr =
+ (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+ sizeof(struct icp_qat_uof_initmem));
+
+ init_header = *init_tab_base;
+ if (!init_header) {
+ init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+ if (!init_header)
+ return -ENOMEM;
+ init_header->size = 1;
+ *init_tab_base = init_header;
+ flag = 1;
+ }
+ tail_old = init_header;
+ while (tail_old->next)
+ tail_old = tail_old->next;
+ tail = tail_old;
+ for (i = 0; i < init_mem->val_attr_num; i++) {
+ mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+ if (!mem_init)
+ goto out_err;
+ mem_init->ae = ae;
+ mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+ mem_init->value = &mem_val_attr->value;
+ mem_init->size = 4;
+ mem_init->next = NULL;
+ tail->next = mem_init;
+ tail = mem_init;
+ init_header->size += qat_hal_get_ins_num();
+ mem_val_attr++;
+ }
+ return 0;
+out_err:
+ while (tail_old) {
+ mem_init = tail_old->next;
+ kfree(tail_old);
+ tail_old = mem_init;
+ }
+ if (flag)
+ kfree(*init_tab_base);
+ return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_initmem *init_mem)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int ae;
+
+ if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+ ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
+ return -EINVAL;
+ if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+ &obj_handle->lm_init_tab[ae]))
+ return -EINVAL;
+ return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_initmem *init_mem)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int ae, ustore_size, uaddr, i;
+
+ ustore_size = obj_handle->ustore_phy_size;
+ if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+ return -EINVAL;
+ if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+ &obj_handle->umem_init_tab[ae]))
+ return -EINVAL;
+ /* set the highest ustore address referenced */
+ uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+ for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
+ if (obj_handle->ae_data[ae].ae_slices[i].
+ encap_image->uwords_num < uaddr)
+ obj_handle->ae_data[ae].ae_slices[i].
+ encap_image->uwords_num = uaddr;
+ }
+ return 0;
+}
+
+#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_initmem *init_mem)
+{
+ unsigned int i;
+ struct icp_qat_uof_memvar_attr *mem_val_attr;
+
+ mem_val_attr =
+ (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+ sizeof(struct icp_qat_uof_initmem));
+
+ switch (init_mem->region) {
+ case ICP_QAT_UOF_SRAM_REGION:
+ if ((init_mem->addr + init_mem->num_in_bytes) >
+ ICP_DH895XCC_PESRAM_BAR_SIZE) {
+ pr_err("QAT: initmem on SRAM is out of range");
+ return -EINVAL;
+ }
+ for (i = 0; i < init_mem->val_attr_num; i++) {
+ qat_uclo_wr_sram_by_words(handle,
+ init_mem->addr +
+ mem_val_attr->offset_in_byte,
+ &mem_val_attr->value, 4);
+ mem_val_attr++;
+ }
+ break;
+ case ICP_QAT_UOF_LMEM_REGION:
+ if (qat_uclo_init_lmem_seg(handle, init_mem))
+ return -EINVAL;
+ break;
+ case ICP_QAT_UOF_UMEM_REGION:
+ if (qat_uclo_init_umem_seg(handle, init_mem))
+ return -EINVAL;
+ break;
+ default:
+ pr_err("QAT: initmem region error. region type=0x%x\n",
+ init_mem->region);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uclo_encapme *image)
+{
+ unsigned int i;
+ struct icp_qat_uclo_encap_page *page;
+ struct icp_qat_uof_image *uof_image;
+ unsigned char ae;
+ unsigned int ustore_size;
+ unsigned int patt_pos;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ uint64_t *fill_data;
+
+ uof_image = image->img_ptr;
+ fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
+ GFP_KERNEL);
+ if (!fill_data)
+ return -ENOMEM;
+ for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+ memcpy(&fill_data[i], &uof_image->fill_pattern,
+ sizeof(uint64_t));
+ page = image->page;
+
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+ continue;
+ ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+ patt_pos = page->beg_addr_p + page->micro_words_num;
+
+ qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+ page->beg_addr_p, &fill_data[0]);
+ qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+ ustore_size - patt_pos + 1,
+ &fill_data[page->beg_addr_p]);
+ }
+ kfree(fill_data);
+ return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+ int i, ae;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+
+ for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+ if (initmem->num_in_bytes) {
+ if (qat_uclo_init_ae_memory(handle, initmem))
+ return -EINVAL;
+ }
+ initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
+ (unsigned long)initmem +
+ sizeof(struct icp_qat_uof_initmem)) +
+ (sizeof(struct icp_qat_uof_memvar_attr) *
+ initmem->val_attr_num));
+ }
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (qat_hal_batch_wr_lm(handle, ae,
+ obj_handle->lm_init_tab[ae])) {
+ pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+ return -EINVAL;
+ }
+ qat_uclo_cleanup_batch_init_list(handle,
+ &obj_handle->lm_init_tab[ae]);
+ qat_uclo_batch_wr_umem(handle, ae,
+ obj_handle->umem_init_tab[ae]);
+ qat_uclo_cleanup_batch_init_list(handle,
+ &obj_handle->
+ umem_init_tab[ae]);
+ }
+ return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+ char *chunk_id, void *cur)
+{
+ int i;
+ struct icp_qat_uof_chunkhdr *chunk_hdr =
+ (struct icp_qat_uof_chunkhdr *)
+ ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+ for (i = 0; i < obj_hdr->num_chunks; i++) {
+ if ((cur < (void *)&chunk_hdr[i]) &&
+ !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+ ICP_QAT_UOF_OBJID_LEN)) {
+ return &chunk_hdr[i];
+ }
+ }
+ return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+ int i;
+ unsigned int topbit = 1 << 0xF;
+ unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+ reg ^= inbyte << 0x8;
+ for (i = 0; i < 0x8; i++) {
+ if (reg & topbit)
+ reg = (reg << 1) ^ 0x1021;
+ else
+ reg <<= 1;
+ }
+ return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+ unsigned int chksum = 0;
+
+ if (ptr)
+ while (num--)
+ chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+ return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+ char *chunk_id)
+{
+ struct icp_qat_uof_filechunkhdr *file_chunk;
+ struct icp_qat_uclo_objhdr *obj_hdr;
+ char *chunk;
+ int i;
+
+ file_chunk = (struct icp_qat_uof_filechunkhdr *)
+ (buf + sizeof(struct icp_qat_uof_filehdr));
+ for (i = 0; i < file_hdr->num_chunks; i++) {
+ if (!strncmp(file_chunk->chunk_id, chunk_id,
+ ICP_QAT_UOF_OBJID_LEN)) {
+ chunk = buf + file_chunk->offset;
+ if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+ chunk, file_chunk->size))
+ break;
+ obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+ if (!obj_hdr)
+ break;
+ obj_hdr->file_buff = chunk;
+ obj_hdr->checksum = file_chunk->checksum;
+ obj_hdr->size = file_chunk->size;
+ return obj_hdr;
+ }
+ file_chunk++;
+ }
+ return NULL;
+}
+
+static unsigned int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+ struct icp_qat_uof_image *image)
+{
+ struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+ struct icp_qat_uof_objtable *neigh_reg_tab;
+ struct icp_qat_uof_code_page *code_page;
+
+ code_page = (struct icp_qat_uof_code_page *)
+ ((char *)image + sizeof(struct icp_qat_uof_image));
+ uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+ code_page->uc_var_tab_offset);
+ imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+ code_page->imp_var_tab_offset);
+ imp_expr_tab = (struct icp_qat_uof_objtable *)
+ (encap_uof_obj->beg_uof +
+ code_page->imp_expr_tab_offset);
+ if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+ imp_expr_tab->entry_num) {
+ pr_err("QAT: UOF can't contain imported variable to be parsed");
+ return -EINVAL;
+ }
+ neigh_reg_tab = (struct icp_qat_uof_objtable *)
+ (encap_uof_obj->beg_uof +
+ code_page->neigh_reg_tab_offset);
+ if (neigh_reg_tab->entry_num) {
+ pr_err("QAT: UOF can't contain shared control store feature");
+ return -EINVAL;
+ }
+ if (image->numpages > 1) {
+ pr_err("QAT: UOF can't contain multiple pages");
+ return -EINVAL;
+ }
+ if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+ pr_err("QAT: UOF can't use shared control store feature");
+ return -EFAULT;
+ }
+ if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+ pr_err("QAT: UOF can't use reloadable feature");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+ *encap_uof_obj,
+ struct icp_qat_uof_image *img,
+ struct icp_qat_uclo_encap_page *page)
+{
+ struct icp_qat_uof_code_page *code_page;
+ struct icp_qat_uof_code_area *code_area;
+ struct icp_qat_uof_objtable *uword_block_tab;
+ struct icp_qat_uof_uword_block *uwblock;
+ int i;
+
+ code_page = (struct icp_qat_uof_code_page *)
+ ((char *)img + sizeof(struct icp_qat_uof_image));
+ page->def_page = code_page->def_page;
+ page->page_region = code_page->page_region;
+ page->beg_addr_v = code_page->beg_addr_v;
+ page->beg_addr_p = code_page->beg_addr_p;
+ code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+ code_page->code_area_offset);
+ page->micro_words_num = code_area->micro_words_num;
+ uword_block_tab = (struct icp_qat_uof_objtable *)
+ (encap_uof_obj->beg_uof +
+ code_area->uword_block_tab);
+ page->uwblock_num = uword_block_tab->entry_num;
+ uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+ sizeof(struct icp_qat_uof_objtable));
+ page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+ for (i = 0; i < uword_block_tab->entry_num; i++)
+ page->uwblock[i].micro_words =
+ (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+ struct icp_qat_uclo_encapme *ae_uimage,
+ int max_image)
+{
+ int i, j;
+ struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+ struct icp_qat_uof_image *image;
+ struct icp_qat_uof_objtable *ae_regtab;
+ struct icp_qat_uof_objtable *init_reg_sym_tab;
+ struct icp_qat_uof_objtable *sbreak_tab;
+ struct icp_qat_uof_encap_obj *encap_uof_obj =
+ &obj_handle->encap_uof_obj;
+
+ for (j = 0; j < max_image; j++) {
+ chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+ ICP_QAT_UOF_IMAG, chunk_hdr);
+ if (!chunk_hdr)
+ break;
+ image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+ chunk_hdr->offset);
+ ae_regtab = (struct icp_qat_uof_objtable *)
+ (image->reg_tab_offset +
+ obj_handle->obj_hdr->file_buff);
+ ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+ ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+ (((char *)ae_regtab) +
+ sizeof(struct icp_qat_uof_objtable));
+ init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+ (image->init_reg_sym_tab +
+ obj_handle->obj_hdr->file_buff);
+ ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+ ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+ (((char *)init_reg_sym_tab) +
+ sizeof(struct icp_qat_uof_objtable));
+ sbreak_tab = (struct icp_qat_uof_objtable *)
+ (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+ ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+ ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+ (((char *)sbreak_tab) +
+ sizeof(struct icp_qat_uof_objtable));
+ ae_uimage[j].img_ptr = image;
+ if (qat_uclo_check_image_compat(encap_uof_obj, image))
+ goto out_err;
+ ae_uimage[j].page =
+ kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+ GFP_KERNEL);
+ if (!ae_uimage[j].page)
+ goto out_err;
+ qat_uclo_map_image_page(encap_uof_obj, image,
+ ae_uimage[j].page);
+ }
+ return j;
+out_err:
+ for (i = 0; i < j; i++)
+ kfree(ae_uimage[i].page);
+ return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+ int i, ae;
+ int mflag = 0;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+ for (ae = 0; ae <= max_ae; ae++) {
+ if (!test_bit(ae,
+ (unsigned long *)&handle->hal_handle->ae_mask))
+ continue;
+ for (i = 0; i < obj_handle->uimage_num; i++) {
+ if (!test_bit(ae, (unsigned long *)
+ &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+ continue;
+ mflag = 1;
+ if (qat_uclo_init_ae_data(obj_handle, ae, i))
+ return -EINVAL;
+ }
+ }
+ if (!mflag) {
+ pr_err("QAT: uimage uses AE not set");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+ char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+ struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+ chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+ obj_hdr->file_buff, tab_name, NULL);
+ if (chunk_hdr) {
+ int hdr_size;
+
+ memcpy(&str_table->table_len, obj_hdr->file_buff +
+ chunk_hdr->offset, sizeof(str_table->table_len));
+ hdr_size = (char *)&str_table->strings - (char *)str_table;
+ str_table->strings = (unsigned long)obj_hdr->file_buff +
+ chunk_hdr->offset + hdr_size;
+ return str_table;
+ }
+ return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+ struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+ struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+ chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+ ICP_QAT_UOF_IMEM, NULL);
+ if (chunk_hdr) {
+ memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+ chunk_hdr->offset, sizeof(unsigned int));
+ init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+ (encap_uof_obj->beg_uof + chunk_hdr->offset +
+ sizeof(unsigned int));
+ }
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+ unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+ if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
+ pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
+ obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
+ return -EINVAL;
+ }
+ maj_ver = obj_handle->prod_rev & 0xff;
+ if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
+ (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
+ pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_addr, unsigned int value)
+{
+ switch (reg_type) {
+ case ICP_GPA_ABS:
+ case ICP_GPB_ABS:
+ ctx_mask = 0;
+ case ICP_GPA_REL:
+ case ICP_GPB_REL:
+ return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+ reg_addr, value);
+ case ICP_SR_ABS:
+ case ICP_DR_ABS:
+ case ICP_SR_RD_ABS:
+ case ICP_DR_RD_ABS:
+ ctx_mask = 0;
+ case ICP_SR_REL:
+ case ICP_DR_REL:
+ case ICP_SR_RD_REL:
+ case ICP_DR_RD_REL:
+ return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+ reg_addr, value);
+ case ICP_SR_WR_ABS:
+ case ICP_DR_WR_ABS:
+ ctx_mask = 0;
+ case ICP_SR_WR_REL:
+ case ICP_DR_WR_REL:
+ return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+ reg_addr, value);
+ case ICP_NEIGH_REL:
+ return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+ default:
+ pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+ unsigned int ae,
+ struct icp_qat_uclo_encapme *encap_ae)
+{
+ unsigned int i;
+ unsigned char ctx_mask;
+ struct icp_qat_uof_init_regsym *init_regsym;
+
+ if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+ ICP_QAT_UCLO_MAX_CTX)
+ ctx_mask = 0xff;
+ else
+ ctx_mask = 0x55;
+
+ for (i = 0; i < encap_ae->init_regsym_num; i++) {
+ unsigned int exp_res;
+
+ init_regsym = &encap_ae->init_regsym[i];
+ exp_res = init_regsym->value;
+ switch (init_regsym->init_type) {
+ case ICP_QAT_UOF_INIT_REG:
+ qat_uclo_init_reg(handle, ae, ctx_mask,
+ (enum icp_qat_uof_regtype)
+ init_regsym->reg_type,
+ (unsigned short)init_regsym->reg_addr,
+ exp_res);
+ break;
+ case ICP_QAT_UOF_INIT_REG_CTX:
+ /* check if ctx is appropriate for the ctxMode */
+ if (!((1 << init_regsym->ctx) & ctx_mask)) {
+ pr_err("QAT: invalid ctx num = 0x%x\n",
+ init_regsym->ctx);
+ return -EINVAL;
+ }
+ qat_uclo_init_reg(handle, ae,
+ (unsigned char)
+ (1 << init_regsym->ctx),
+ (enum icp_qat_uof_regtype)
+ init_regsym->reg_type,
+ (unsigned short)init_regsym->reg_addr,
+ exp_res);
+ break;
+ case ICP_QAT_UOF_INIT_EXPR:
+ pr_err("QAT: INIT_EXPR feature not supported\n");
+ return -EINVAL;
+ case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+ pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int s, ae;
+
+ if (obj_handle->global_inited)
+ return 0;
+ if (obj_handle->init_mem_tab.entry_num) {
+ if (qat_uclo_init_memory(handle)) {
+ pr_err("QAT: initialize memory failed\n");
+ return -EINVAL;
+ }
+ }
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+ if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+ continue;
+ if (qat_uclo_init_reg_sym(handle, ae,
+ obj_handle->ae_data[ae].
+ ae_slices[s].encap_image))
+ return -EINVAL;
+ }
+ }
+ obj_handle->global_inited = 1;
+ return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned char ae, nn_mode, s;
+ struct icp_qat_uof_image *uof_image;
+ struct icp_qat_uclo_aedata *ae_data;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (!test_bit(ae,
+ (unsigned long *)&handle->hal_handle->ae_mask))
+ continue;
+ ae_data = &obj_handle->ae_data[ae];
+ for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+ ICP_QAT_UCLO_MAX_CTX); s++) {
+ if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+ continue;
+ uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+ if (qat_hal_set_ae_ctx_mode(handle, ae,
+ (char)ICP_QAT_CTX_MODE
+ (uof_image->ae_mode))) {
+ pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+ return -EFAULT;
+ }
+ nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+ if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
+ pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+ return -EFAULT;
+ }
+ if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
+ (char)ICP_QAT_LOC_MEM0_MODE
+ (uof_image->ae_mode))) {
+ pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+ return -EFAULT;
+ }
+ if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
+ (char)ICP_QAT_LOC_MEM1_MODE
+ (uof_image->ae_mode))) {
+ pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+ return -EFAULT;
+ }
+ }
+ }
+ return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ struct icp_qat_uclo_encapme *image;
+ int a;
+
+ for (a = 0; a < obj_handle->uimage_num; a++) {
+ image = &obj_handle->ae_uimage[a];
+ image->uwords_num = image->page->beg_addr_p +
+ image->page->micro_words_num;
+ }
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int ae;
+
+ obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+ GFP_KERNEL);
+ if (!obj_handle->uword_buf)
+ return -ENOMEM;
+ obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+ obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+ obj_handle->obj_hdr->file_buff;
+ obj_handle->uword_in_bytes = 6;
+ obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
+ obj_handle->prod_rev = PID_MAJOR_REV |
+ (PID_MINOR_REV & handle->hal_handle->revision_id);
+ if (qat_uclo_check_uof_compat(obj_handle)) {
+ pr_err("QAT: UOF incompatible\n");
+ return -EINVAL;
+ }
+ obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+ if (!obj_handle->obj_hdr->file_buff ||
+ !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+ &obj_handle->str_table)) {
+ pr_err("QAT: UOF doesn't have effective images\n");
+ goto out_err;
+ }
+ obj_handle->uimage_num =
+ qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+ ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+ if (!obj_handle->uimage_num)
+ goto out_err;
+ if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+ pr_err("QAT: Bad object\n");
+ goto out_check_uof_aemask_err;
+ }
+ qat_uclo_init_uword_num(handle);
+ qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+ &obj_handle->init_mem_tab);
+ if (qat_uclo_set_ae_mode(handle))
+ goto out_check_uof_aemask_err;
+ return 0;
+out_check_uof_aemask_err:
+ for (ae = 0; ae < obj_handle->uimage_num; ae++)
+ kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+ kfree(obj_handle->uword_buf);
+ return -EFAULT;
+}
+
+int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size)
+{
+ struct icp_qat_uof_filehdr *filehdr;
+ struct icp_qat_uclo_objhandle *objhdl;
+
+ BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+ (sizeof(handle->hal_handle->ae_mask) * 8));
+
+ if (!handle || !addr_ptr || mem_size < 24)
+ return -EINVAL;
+ objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+ if (!objhdl)
+ return -ENOMEM;
+ objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+ if (!objhdl->obj_buf)
+ goto out_objbuf_err;
+ filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+ if (qat_uclo_check_format(filehdr))
+ goto out_objhdr_err;
+ objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+ ICP_QAT_UOF_OBJS);
+ if (!objhdl->obj_hdr) {
+ pr_err("QAT: object file chunk is null\n");
+ goto out_objhdr_err;
+ }
+ handle->obj_handle = objhdl;
+ if (qat_uclo_parse_uof_obj(handle))
+ goto out_overlay_obj_err;
+ return 0;
+
+out_overlay_obj_err:
+ handle->obj_handle = NULL;
+ kfree(objhdl->obj_hdr);
+out_objhdr_err:
+ kfree(objhdl->obj_buf);
+out_objbuf_err:
+ kfree(objhdl);
+ return -ENOMEM;
+}
+
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int a;
+
+ if (!obj_handle)
+ return;
+
+ kfree(obj_handle->uword_buf);
+ for (a = 0; a < obj_handle->uimage_num; a++)
+ kfree(obj_handle->ae_uimage[a].page);
+
+ for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+ qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+ kfree(obj_handle->obj_hdr);
+ kfree(obj_handle->obj_buf);
+ kfree(obj_handle);
+ handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+ struct icp_qat_uclo_encap_page *encap_page,
+ uint64_t *uword, unsigned int addr_p,
+ unsigned int raddr, uint64_t fill)
+{
+ uint64_t uwrd = 0;
+ unsigned int i;
+
+ if (!encap_page) {
+ *uword = fill;
+ return;
+ }
+ for (i = 0; i < encap_page->uwblock_num; i++) {
+ if (raddr >= encap_page->uwblock[i].start_addr &&
+ raddr <= encap_page->uwblock[i].start_addr +
+ encap_page->uwblock[i].words_num - 1) {
+ raddr -= encap_page->uwblock[i].start_addr;
+ raddr *= obj_handle->uword_in_bytes;
+ memcpy(&uwrd, (void *)(((unsigned long)
+ encap_page->uwblock[i].micro_words) + raddr),
+ obj_handle->uword_in_bytes);
+ uwrd = uwrd & 0xbffffffffffull;
+ }
+ }
+ *uword = uwrd;
+ if (*uword == INVLD_UWORD)
+ *uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uclo_encap_page
+ *encap_page, unsigned int ae)
+{
+ unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ uint64_t fill_pat;
+
+ /* load the page starting at appropriate ustore address */
+ /* get fill-pattern from an image -- they are all the same */
+ memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+ sizeof(uint64_t));
+ uw_physical_addr = encap_page->beg_addr_p;
+ uw_relative_addr = 0;
+ words_num = encap_page->micro_words_num;
+ while (words_num) {
+ if (words_num < UWORD_CPYBUF_SIZE)
+ cpylen = words_num;
+ else
+ cpylen = UWORD_CPYBUF_SIZE;
+
+ /* load the buffer */
+ for (i = 0; i < cpylen; i++)
+ qat_uclo_fill_uwords(obj_handle, encap_page,
+ &obj_handle->uword_buf[i],
+ uw_physical_addr + i,
+ uw_relative_addr + i, fill_pat);
+
+ /* copy the buffer to ustore */
+ qat_hal_wr_uwords(handle, (unsigned char)ae,
+ uw_physical_addr, cpylen,
+ obj_handle->uword_buf);
+
+ uw_physical_addr += cpylen;
+ uw_relative_addr += cpylen;
+ words_num -= cpylen;
+ }
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_image *image)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int ctx_mask, s;
+ struct icp_qat_uclo_page *page;
+ unsigned char ae;
+ int ctx;
+
+ if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+ ctx_mask = 0xff;
+ else
+ ctx_mask = 0x55;
+ /* load the default page and set assigned CTX PC
+ * to the entrypoint address */
+ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+ if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
+ continue;
+ /* find the slice to which this image is assigned */
+ for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+ if (image->ctx_assigned & obj_handle->ae_data[ae].
+ ae_slices[s].ctx_mask_assigned)
+ break;
+ }
+ if (s >= obj_handle->ae_data[ae].slice_num)
+ continue;
+ page = obj_handle->ae_data[ae].ae_slices[s].page;
+ if (!page->encap_page->def_page)
+ continue;
+ qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+ page = obj_handle->ae_data[ae].ae_slices[s].page;
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+ obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
+ (ctx_mask & (1 << ctx)) ? page : NULL;
+ qat_hal_set_live_ctx(handle, (unsigned char)ae,
+ image->ctx_assigned);
+ qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+ image->entry_address);
+ }
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int i;
+
+ if (qat_uclo_init_globals(handle))
+ return -EINVAL;
+ for (i = 0; i < obj_handle->uimage_num; i++) {
+ if (!obj_handle->ae_uimage[i].img_ptr)
+ return -EINVAL;
+ if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+ return -EINVAL;
+ qat_uclo_wr_uimage_page(handle,
+ obj_handle->ae_uimage[i].img_ptr);
+ }
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
new file mode 100644
index 000000000000..25171c557043
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -0,0 +1,8 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o \
+ adf_isr.o \
+ adf_dh895xcc_hw_data.o \
+ adf_hw_arbiter.o \
+ qat_admin.o \
+ adf_admin.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
new file mode 100644
index 000000000000..978d6c56639d
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
@@ -0,0 +1,144 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <adf_accel_devices.h>
+#include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
+
+#define ADF_ADMINMSG_LEN 32
+
+struct adf_admin_comms {
+ dma_addr_t phy_addr;
+ void *virt_addr;
+ void __iomem *mailbox_addr;
+ struct mutex lock; /* protects adf_admin_comms struct */
+};
+
+int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
+ uint32_t ae, void *in, void *out)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+ int offset = ae * ADF_ADMINMSG_LEN * 2;
+ void __iomem *mailbox = admin->mailbox_addr;
+ int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+ int times, received;
+
+ mutex_lock(&admin->lock);
+
+ if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+ mutex_unlock(&admin->lock);
+ return -EAGAIN;
+ }
+
+ memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+ ADF_CSR_WR(mailbox, mb_offset, 1);
+ received = 0;
+ for (times = 0; times < 50; times++) {
+ msleep(20);
+ if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+ received = 1;
+ break;
+ }
+ }
+ if (received)
+ memcpy(out, admin->virt_addr + offset +
+ ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+ else
+ pr_err("QAT: Failed to send admin msg to accelerator\n");
+
+ mutex_unlock(&admin->lock);
+ return received ? 0 : -EFAULT;
+}
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin;
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+ void __iomem *csr = pmisc->virt_addr;
+ void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+ uint64_t reg_val;
+
+ admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+ accel_dev->numa_node);
+ if (!admin)
+ return -ENOMEM;
+ admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &admin->phy_addr, GFP_KERNEL);
+ if (!admin->virt_addr) {
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+ kfree(admin);
+ return -ENOMEM;
+ }
+ reg_val = (uint64_t)admin->phy_addr;
+ ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+ ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+ mutex_init(&admin->lock);
+ admin->mailbox_addr = mailbox;
+ accel_dev->admin = admin;
+ return 0;
+}
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+
+ if (!admin)
+ return;
+
+ if (admin->virt_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+
+ mutex_destroy(&admin->lock);
+ kfree(admin);
+ accel_dev->admin = NULL;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644
index 000000000000..ef05825cc651
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -0,0 +1,214 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_drv.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const uint32_t thrd_to_arb_map_sku4[] = {
+ 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+ 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+static const uint32_t thrd_to_arb_map_sku6[] = {
+ 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+ 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+ 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+ .name = ADF_DH895XCC_DEVICE_NAME,
+ .type = DEV_DH895XCC,
+ .instances = 0
+};
+
+static uint32_t get_accel_mask(uint32_t fuse)
+{
+ return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+ ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static uint32_t get_ae_mask(uint32_t fuse)
+{
+ return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static uint32_t get_num_accels(struct adf_hw_device_data *self)
+{
+ uint32_t i, ctr = 0;
+
+ if (!self || !self->accel_mask)
+ return 0;
+
+ for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
+ if (self->accel_mask & (1 << i))
+ ctr++;
+ }
+ return ctr;
+}
+
+static uint32_t get_num_aes(struct adf_hw_device_data *self)
+{
+ uint32_t i, ctr = 0;
+
+ if (!self || !self->ae_mask)
+ return 0;
+
+ for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
+ if (self->ae_mask & (1 << i))
+ ctr++;
+ }
+ return ctr;
+}
+
+static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCC_PMISC_BAR;
+}
+
+static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCC_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+ >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+ switch (sku) {
+ case ADF_DH895XCC_FUSECTL_SKU_1:
+ return DEV_SKU_1;
+ case ADF_DH895XCC_FUSECTL_SKU_2:
+ return DEV_SKU_2;
+ case ADF_DH895XCC_FUSECTL_SKU_3:
+ return DEV_SKU_3;
+ case ADF_DH895XCC_FUSECTL_SKU_4:
+ return DEV_SKU_4;
+ default:
+ return DEV_SKU_UNKNOWN;
+ }
+ return DEV_SKU_UNKNOWN;
+}
+
+void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+ uint32_t const **arb_map_config)
+{
+ switch (accel_dev->accel_pci_dev.sku) {
+ case DEV_SKU_1:
+ *arb_map_config = thrd_to_arb_map_sku4;
+ break;
+
+ case DEV_SKU_2:
+ case DEV_SKU_4:
+ *arb_map_config = thrd_to_arb_map_sku6;
+ break;
+ default:
+ pr_err("QAT: The configuration doesn't match any SKU");
+ *arb_map_config = NULL;
+ }
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+ void __iomem *csr = misc_bar->virt_addr;
+ unsigned int val, i;
+
+ /* Enable Accel Engine error detection & correction */
+ for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+ val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
+ val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
+ ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
+ val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
+ val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
+ ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
+ }
+
+ /* Enable shared memory error detection & correction */
+ for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+ val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
+ val |= ADF_DH895XCC_ERRSSMSH_EN;
+ ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
+ val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
+ val |= ADF_DH895XCC_ERRSSMSH_EN;
+ ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
+ }
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &dh895xcc_class;
+ hw_data->instance_id = dh895xcc_class.instances++;
+ hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+ hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+ hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = adf_enable_error_correction;
+ hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
+ hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_sku = get_sku;
+ hw_data->fw_name = ADF_DH895XCC_FW;
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644
index 000000000000..b707f292b377
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -0,0 +1,86 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_RX_RINGS_OFFSET 8
+#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
+#define ADF_DH895XCC_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28)
+#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12)
+#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_DH895XCC_ERRSSMSH_EN (1 << 3)
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
new file mode 100644
index 000000000000..0d0435a41be9
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -0,0 +1,449 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = adf_driver_name,
+ .probe = adf_probe,
+ .remove = adf_remove
+};
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ int i;
+
+ adf_exit_admin_comms(accel_dev);
+ adf_exit_arb(accel_dev);
+ adf_cleanup_etr_data(accel_dev);
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_dev->hw_device->pci_dev_id) {
+ case ADF_DH895XCC_PCI_DEVICE_ID:
+ adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ }
+ adf_cfg_dev_remove(accel_dev);
+ debugfs_remove(accel_dev->debugfs_dir);
+ adf_devmgr_rm_dev(accel_dev);
+ pci_release_regions(accel_pci_dev->pci_dev);
+ pci_disable_device(accel_pci_dev->pci_dev);
+ kfree(accel_dev);
+}
+
+static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
+{
+ unsigned int bus_per_cpu = 0;
+ struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
+
+ if (!c->phys_proc_id)
+ return 0;
+
+ bus_per_cpu = 256 / (c->phys_proc_id + 1);
+
+ if (bus_per_cpu != 0)
+ return pdev->bus->number / bus_per_cpu;
+ return 0;
+}
+
+static int qat_dev_start(struct adf_accel_dev *accel_dev)
+{
+ int cpus = num_online_cpus();
+ int banks = GET_MAX_BANKS(accel_dev);
+ int instances = min(cpus, banks);
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int i;
+ unsigned long val;
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ goto err;
+ if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+ goto err;
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+ i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ val = 128;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 4;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 8;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 10;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 12;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, (void *)&val, ADF_DEC))
+ goto err;
+ }
+
+ val = i;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ return adf_dev_start(accel_dev);
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
+ return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ void __iomem *pmisc_bar_addr = NULL;
+ char name[ADF_DEVICE_NAME_LENGTH];
+ unsigned int i, bar_nr;
+ uint8_t node;
+ int ret;
+
+ switch (ent->device) {
+ case ADF_DH895XCC_PCI_DEVICE_ID:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ node = adf_get_dev_node_id(pdev);
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ accel_dev->numa_node = node;
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+ /* Add accel device to accel table.
+ * This should be called before adf_cleanup_accel is called */
+ if (adf_devmgr_add_dev(accel_dev)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->hw_device = hw_data;
+ switch (ent->device) {
+ case ADF_DH895XCC_PCI_DEVICE_ID:
+ adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+ break;
+ default:
+ return -ENODEV;
+ }
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
+ &hw_data->fuses);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+ accel_pci_dev->pci_dev = pdev;
+ /* If the device has no acceleration engines then ignore it. */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ ((~hw_data->ae_mask) & 0x01)) {
+ dev_err(&pdev->dev, "No acceleration units found");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Create dev top level debugfs entry */
+ snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, hw_data->instance_id);
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+ if (!accel_dev->debugfs_dir) {
+ dev_err(&pdev->dev, "Could not create debugfs dir\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ ret = -EFAULT;
+ goto out_err;
+ } else {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ } else {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ }
+
+ if (pci_request_regions(pdev, adf_driver_name)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Read accelerator capabilities mask */
+ pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
+ &hw_data->accel_capabilities_mask);
+
+ /* Find and map all the device's BARS */
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ bar_nr = i * 2;
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+ ret = -EFAULT;
+ goto out_err;
+ }
+ if (i == ADF_DH895XCC_PMISC_BAR)
+ pmisc_bar_addr = bar->virt_addr;
+ }
+ pci_set_master(pdev);
+
+ if (adf_enable_aer(accel_dev, &adf_driver)) {
+ dev_err(&pdev->dev, "Failed to enable aer\n");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ if (adf_init_etr_data(accel_dev)) {
+ dev_err(&pdev->dev, "Failed initialize etr\n");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ if (adf_init_admin_comms(accel_dev)) {
+ dev_err(&pdev->dev, "Failed initialize admin comms\n");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ if (adf_init_arb(accel_dev)) {
+ dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
+ ret = -EFAULT;
+ goto out_err;
+ }
+ if (pci_save_state(pdev)) {
+ dev_err(&pdev->dev, "Failed to save pci state\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Enable bundle and misc interrupts */
+ ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
+ ADF_DH895XCC_SMIA0_MASK);
+ ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
+ ADF_DH895XCC_SMIA1_MASK);
+
+ ret = qat_dev_start(accel_dev);
+ if (ret) {
+ adf_dev_stop(accel_dev);
+ goto out_err;
+ }
+
+ return 0;
+out_err:
+ adf_cleanup_accel(accel_dev);
+ return ret;
+}
+
+static void __exit adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ if (adf_dev_stop(accel_dev))
+ dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+ adf_disable_aer(accel_dev);
+ adf_cleanup_accel(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+ if (qat_admin_register())
+ return -EFAULT;
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+ qat_admin_unregister();
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE("qat_895xcc.bin");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
new file mode 100644
index 000000000000..a2fbb6ce75cd
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
@@ -0,0 +1,67 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_DRV_H_
+#define ADF_DH895x_DRV_H_
+#include <adf_accel_devices.h>
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+ uint32_t const **arb_map_config);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
+ uint32_t ae, void *in, void *out);
+int qat_admin_register(void);
+int qat_admin_unregister(void);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
new file mode 100644
index 000000000000..1864bdb36f8f
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
@@ -0,0 +1,159 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_transport_internal.h>
+#include "adf_drv.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REQ_RING_NUM 8
+#define ADF_ARB_REG_SIZE 0x4
+#define ADF_ARB_WTR_SIZE 0x20
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_WTR_OFFSET 0x010
+#define ADF_ARB_RO_EN_OFFSET 0x090
+#define ADF_ARB_WQCFG_OFFSET 0x100
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_WRK_2_SER_MAP 10
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * index), value)
+
+#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+ ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
+ ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+ ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
+ (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
+ (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+ ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
+ (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+ ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+ uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+ uint32_t arb, i;
+ const uint32_t *thd_2_arb_cfg;
+
+ /* Service arb configured for 32 bytes responses and
+ * ring flow control check enabled. */
+ for (arb = 0; arb < ADF_ARB_NUM; arb++)
+ WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
+
+ /* Setup service weighting */
+ for (arb = 0; arb < ADF_ARB_NUM; arb++)
+ for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+ WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
+
+ /* Setup ring response ordering */
+ for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+ WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
+
+ /* Setup worker queue registers */
+ for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ WRITE_CSR_ARB_WQCFG(csr, i, i);
+
+ /* Map worker threads to service arbiters */
+ adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
+
+ if (!thd_2_arb_cfg)
+ return -EFAULT;
+
+ for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
+
+ return 0;
+}
+
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
+{
+ WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
+ ring->bank->bank_number,
+ ring->bank->ring_mask & 0xFF);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr;
+ unsigned int i;
+
+ if (!accel_dev->transport)
+ return;
+
+ csr = accel_dev->transport->banks[0].csr_addr;
+
+ /* Reset arbiter configuration */
+ for (i = 0; i < ADF_ARB_NUM; i++)
+ WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
+
+ /* Shutdown work queue */
+ for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ WRITE_CSR_ARB_WQCFG(csr, i, 0);
+
+ /* Unmap worker threads to service arbiters */
+ for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
+
+ /* Disable arbitration on all rings */
+ for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+ WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
new file mode 100644
index 000000000000..d4172dedf775
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -0,0 +1,266 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include "adf_drv.h"
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ uint32_t msix_num_entries = hw_data->num_banks + 1;
+ int i;
+
+ for (i = 0; i < msix_num_entries; i++)
+ pci_dev_info->msix_entries.entries[i].entry = i;
+
+ if (pci_enable_msix(pci_dev_info->pci_dev,
+ pci_dev_info->msix_entries.entries,
+ msix_num_entries)) {
+ pr_err("QAT: Failed to enable MSIX IRQ\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+ pci_disable_msix(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+ struct adf_etr_bank_data *bank = bank_ptr;
+
+ WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
+ tasklet_hi_schedule(&bank->resp_hanlder);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+ struct adf_accel_dev *accel_dev = dev_ptr;
+
+ pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id);
+ return IRQ_HANDLED;
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ int ret, i;
+ char *name;
+
+ /* Request msix irq for all banks */
+ for (i = 0; i < hw_data->num_banks; i++) {
+ struct adf_etr_bank_data *bank = &etr_data->banks[i];
+ unsigned int cpu, cpus = num_online_cpus();
+
+ name = *(pci_dev_info->msix_entries.names + i);
+ snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat%d-bundle%d", accel_dev->accel_id, i);
+ ret = request_irq(msixe[i].vector,
+ adf_msix_isr_bundle, 0, name, bank);
+ if (ret) {
+ pr_err("QAT: failed to enable irq %d for %s\n",
+ msixe[i].vector, name);
+ return ret;
+ }
+
+ cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
+ irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
+ }
+
+ /* Request msix irq for AE */
+ name = *(pci_dev_info->msix_entries.names + i);
+ snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat%d-ae-cluster", accel_dev->accel_id);
+ ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
+ if (ret) {
+ pr_err("QAT: failed to enable irq %d, for %s\n",
+ msixe[i].vector, name);
+ return ret;
+ }
+ return ret;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ int i;
+
+ for (i = 0; i < hw_data->num_banks; i++) {
+ irq_set_affinity_hint(msixe[i].vector, NULL);
+ free_irq(msixe[i].vector, &etr_data->banks[i]);
+ }
+ irq_set_affinity_hint(msixe[i].vector, NULL);
+ free_irq(msixe[i].vector, accel_dev);
+}
+
+static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+ int i;
+ char **names;
+ struct msix_entry *entries;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ uint32_t msix_num_entries = hw_data->num_banks + 1;
+
+ entries = kzalloc_node(msix_num_entries * sizeof(*entries),
+ GFP_KERNEL, accel_dev->numa_node);
+ if (!entries)
+ return -ENOMEM;
+
+ names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
+ if (!names) {
+ kfree(entries);
+ return -ENOMEM;
+ }
+ for (i = 0; i < msix_num_entries; i++) {
+ *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+ if (!(*(names + i)))
+ goto err;
+ }
+ accel_dev->accel_pci_dev.msix_entries.entries = entries;
+ accel_dev->accel_pci_dev.msix_entries.names = names;
+ return 0;
+err:
+ for (i = 0; i < msix_num_entries; i++) {
+ if (*(names + i))
+ kfree(*(names + i));
+ }
+ kfree(entries);
+ kfree(names);
+ return -ENOMEM;
+}
+
+static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ uint32_t msix_num_entries = hw_data->num_banks + 1;
+ char **names = accel_dev->accel_pci_dev.msix_entries.names;
+ int i;
+
+ kfree(accel_dev->accel_pci_dev.msix_entries.entries);
+ for (i = 0; i < msix_num_entries; i++) {
+ if (*(names + i))
+ kfree(*(names + i));
+ }
+ kfree(names);
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int i;
+
+ for (i = 0; i < hw_data->num_banks; i++)
+ tasklet_init(&priv_data->banks[i].resp_hanlder,
+ adf_response_handler,
+ (unsigned long)&priv_data->banks[i]);
+ return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int i;
+
+ for (i = 0; i < hw_data->num_banks; i++) {
+ tasklet_disable(&priv_data->banks[i].resp_hanlder);
+ tasklet_kill(&priv_data->banks[i].resp_hanlder);
+ }
+}
+
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+ adf_free_irqs(accel_dev);
+ adf_cleanup_bh(accel_dev);
+ adf_disable_msix(&accel_dev->accel_pci_dev);
+ adf_isr_free_msix_entry_table(accel_dev);
+}
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_isr_alloc_msix_entry_table(accel_dev);
+ if (ret)
+ return ret;
+ if (adf_enable_msix(accel_dev))
+ goto err_out;
+
+ if (adf_setup_bh(accel_dev))
+ goto err_out;
+
+ if (adf_request_irqs(accel_dev))
+ goto err_out;
+
+ return 0;
+err_out:
+ adf_isr_resource_free(accel_dev);
+ return -EFAULT;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
new file mode 100644
index 000000000000..55b7a8e48bad
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
@@ -0,0 +1,107 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <icp_qat_fw_init_admin.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include "adf_drv.h"
+
+static struct service_hndl qat_admin;
+
+static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_req req;
+ struct icp_qat_fw_init_admin_resp resp;
+ int i;
+
+ memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+ req.init_admin_cmd_id = cmd;
+ for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+ memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+ if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+ resp.init_resp_hdr.status)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int qat_admin_start(struct adf_accel_dev *accel_dev)
+{
+ return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+}
+
+static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
+ enum adf_event event)
+{
+ int ret;
+
+ switch (event) {
+ case ADF_EVENT_START:
+ ret = qat_admin_start(accel_dev);
+ break;
+ case ADF_EVENT_STOP:
+ case ADF_EVENT_INIT:
+ case ADF_EVENT_SHUTDOWN:
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+int qat_admin_register(void)
+{
+ memset(&qat_admin, 0, sizeof(struct service_hndl));
+ qat_admin.event_hld = qat_admin_event_handler;
+ qat_admin.name = "qat_admin";
+ qat_admin.admin = 1;
+ return adf_service_register(&qat_admin);
+}
+
+int qat_admin_unregister(void)
+{
+ return adf_service_unregister(&qat_admin);
+}
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
new file mode 100644
index 000000000000..348dc3173afa
--- /dev/null
+++ b/drivers/crypto/qce/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
+qcrypto-objs := core.o \
+ common.o \
+ dma.o \
+ sha.o \
+ ablkcipher.o
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
new file mode 100644
index 000000000000..ad592de475a4
--- /dev/null
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+#include "cipher.h"
+
+static LIST_HEAD(ablkcipher_algs);
+
+static void qce_ablkcipher_done(void *data)
+{
+ struct crypto_async_request *async_req = data;
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_device *qce = tmpl->qce;
+ enum dma_data_direction dir_src, dir_dst;
+ u32 status;
+ int error;
+ bool diff_dst;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ error = qce_dma_terminate_all(&qce->dma);
+ if (error)
+ dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+ error);
+
+ if (diff_dst)
+ qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
+ rctx->dst_chained);
+ qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+ rctx->dst_chained);
+
+ sg_free_table(&rctx->dst_tbl);
+
+ error = qce_check_status(qce, &status);
+ if (error < 0)
+ dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+
+ qce->async_req_done(tmpl->qce, error);
+}
+
+static int
+qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_device *qce = tmpl->qce;
+ enum dma_data_direction dir_src, dir_dst;
+ struct scatterlist *sg;
+ bool diff_dst;
+ gfp_t gfp;
+ int ret;
+
+ rctx->iv = req->info;
+ rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ rctx->cryptlen = req->nbytes;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ rctx->src_nents = qce_countsg(req->src, req->nbytes,
+ &rctx->src_chained);
+ if (diff_dst) {
+ rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
+ &rctx->dst_chained);
+ } else {
+ rctx->dst_nents = rctx->src_nents;
+ rctx->dst_chained = rctx->src_chained;
+ }
+
+ rctx->dst_nents += 1;
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+ if (ret)
+ return ret;
+
+ sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto error_free;
+ }
+
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto error_free;
+ }
+
+ sg_mark_end(sg);
+ rctx->dst_sg = rctx->dst_tbl.sgl;
+
+ ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+ rctx->dst_chained);
+ if (ret < 0)
+ goto error_free;
+
+ if (diff_dst) {
+ ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
+ rctx->src_chained);
+ if (ret < 0)
+ goto error_unmap_dst;
+ rctx->src_sg = req->src;
+ } else {
+ rctx->src_sg = rctx->dst_sg;
+ }
+
+ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
+ rctx->dst_sg, rctx->dst_nents,
+ qce_ablkcipher_done, async_req);
+ if (ret)
+ goto error_unmap_src;
+
+ qce_dma_issue_pending(&qce->dma);
+
+ ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+ if (ret)
+ goto error_terminate;
+
+ return 0;
+
+error_terminate:
+ qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+ if (diff_dst)
+ qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
+ rctx->src_chained);
+error_unmap_dst:
+ qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+ rctx->dst_chained);
+error_free:
+ sg_free_table(&rctx->dst_tbl);
+ return ret;
+}
+
+static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+ struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
+ int ret;
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ if (IS_AES(flags)) {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ goto fallback;
+ }
+ } else if (IS_DES(flags)) {
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ ret = des_ekey(tmp, key);
+ if (!ret && crypto_ablkcipher_get_flags(ablk) &
+ CRYPTO_TFM_REQ_WEAK_KEY)
+ goto weakkey;
+ }
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+fallback:
+ ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
+ if (!ret)
+ ctx->enc_keylen = keylen;
+ return ret;
+weakkey:
+ crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+}
+
+static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int ret;
+
+ rctx->flags = tmpl->alg_flags;
+ rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+ if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
+ ctx->enc_keylen != AES_KEYSIZE_256) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ ret = encrypt ? crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return ret;
+ }
+
+ return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ return qce_ablkcipher_crypt(req, 1);
+}
+
+static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ return qce_ablkcipher_crypt(req, 0);
+}
+
+static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+ tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+
+ ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
+ CRYPTO_ALG_TYPE_ABLKCIPHER,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback))
+ return PTR_ERR(ctx->fallback);
+
+ return 0;
+}
+
+static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ablkcipher(ctx->fallback);
+}
+
+struct qce_ablkcipher_def {
+ unsigned long flags;
+ const char *name;
+ const char *drv_name;
+ unsigned int blocksize;
+ unsigned int ivsize;
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+};
+
+static const struct qce_ablkcipher_def ablkcipher_def[] = {
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_ECB,
+ .name = "ecb(aes)",
+ .drv_name = "ecb-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CBC,
+ .name = "cbc(aes)",
+ .drv_name = "cbc-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CTR,
+ .name = "ctr(aes)",
+ .drv_name = "ctr-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_XTS,
+ .name = "xts(aes)",
+ .drv_name = "xts-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_ECB,
+ .name = "ecb(des)",
+ .drv_name = "ecb-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = 0,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_CBC,
+ .name = "cbc(des)",
+ .drv_name = "cbc-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_ECB,
+ .name = "ecb(des3_ede)",
+ .drv_name = "ecb-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = 0,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_CBC,
+ .name = "cbc(des3_ede)",
+ .drv_name = "cbc-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+};
+
+static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+ struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl;
+ struct crypto_alg *alg;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl)
+ return -ENOMEM;
+
+ alg = &tmpl->alg.crypto;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+
+ alg->cra_blocksize = def->blocksize;
+ alg->cra_ablkcipher.ivsize = def->ivsize;
+ alg->cra_ablkcipher.min_keysize = def->min_keysize;
+ alg->cra_ablkcipher.max_keysize = def->max_keysize;
+ alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+ alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
+ alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
+
+ alg->cra_priority = 300;
+ alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
+ alg->cra_alignmask = 0;
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = qce_ablkcipher_init;
+ alg->cra_exit = qce_ablkcipher_exit;
+ INIT_LIST_HEAD(&alg->cra_list);
+
+ INIT_LIST_HEAD(&tmpl->entry);
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+ tmpl->alg_flags = def->flags;
+ tmpl->qce = qce;
+
+ ret = crypto_register_alg(alg);
+ if (ret) {
+ kfree(tmpl);
+ dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+ return ret;
+ }
+
+ list_add_tail(&tmpl->entry, &ablkcipher_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+ return 0;
+}
+
+static void qce_ablkcipher_unregister(struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl, *n;
+
+ list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
+ crypto_unregister_alg(&tmpl->alg.crypto);
+ list_del(&tmpl->entry);
+ kfree(tmpl);
+ }
+}
+
+static int qce_ablkcipher_register(struct qce_device *qce)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
+ ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ qce_ablkcipher_unregister(qce);
+ return ret;
+}
+
+const struct qce_algo_ops ablkcipher_ops = {
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .register_algs = qce_ablkcipher_register,
+ .unregister_algs = qce_ablkcipher_unregister,
+ .async_req_handle = qce_ablkcipher_async_req_handle,
+};
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
new file mode 100644
index 000000000000..d5757cfcda2d
--- /dev/null
+++ b/drivers/crypto/qce/cipher.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CIPHER_H_
+#define _CIPHER_H_
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_MAX_KEY_SIZE 64
+
+struct qce_cipher_ctx {
+ u8 enc_key[QCE_MAX_KEY_SIZE];
+ unsigned int enc_keylen;
+ struct crypto_ablkcipher *fallback;
+};
+
+/**
+ * struct qce_cipher_reqctx - holds private cipher objects per request
+ * @flags: operation flags
+ * @iv: pointer to the IV
+ * @ivsize: IV size
+ * @src_nents: source entries
+ * @dst_nents: destination entries
+ * @src_chained: is source chained
+ * @dst_chained: is destination chained
+ * @result_sg: scatterlist used for result buffer
+ * @dst_tbl: destination sg table
+ * @dst_sg: destination sg pointer table beginning
+ * @src_tbl: source sg table
+ * @src_sg: source sg pointer table beginning;
+ * @cryptlen: crypto length
+ */
+struct qce_cipher_reqctx {
+ unsigned long flags;
+ u8 *iv;
+ unsigned int ivsize;
+ int src_nents;
+ int dst_nents;
+ bool src_chained;
+ bool dst_chained;
+ struct scatterlist result_sg;
+ struct sg_table dst_tbl;
+ struct scatterlist *dst_sg;
+ struct sg_table src_tbl;
+ struct scatterlist *src_sg;
+ unsigned int cryptlen;
+};
+
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ return container_of(alg, struct qce_alg_template, alg.crypto);
+}
+
+extern const struct qce_algo_ops ablkcipher_ops;
+
+#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
new file mode 100644
index 000000000000..1fb5fde7fc03
--- /dev/null
+++ b/drivers/crypto/qce/common.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "cipher.h"
+#include "common.h"
+#include "core.h"
+#include "regs-v5.h"
+#include "sha.h"
+
+#define QCE_SECTOR_SIZE 512
+
+static inline u32 qce_read(struct qce_device *qce, u32 offset)
+{
+ return readl(qce->base + offset);
+}
+
+static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
+{
+ writel(val, qce->base + offset);
+}
+
+static inline void qce_write_array(struct qce_device *qce, u32 offset,
+ const u32 *val, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ qce_write(qce, offset + i * sizeof(u32), val[i]);
+}
+
+static inline void
+qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ qce_write(qce, offset + i * sizeof(u32), 0);
+}
+
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags)) {
+ if (aes_key_size == AES_KEYSIZE_128)
+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+ else if (aes_key_size == AES_KEYSIZE_256)
+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+ }
+
+ if (IS_AES(flags))
+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+ else if (IS_DES(flags) || IS_3DES(flags))
+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+ if (IS_DES(flags))
+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+ if (IS_3DES(flags))
+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+ switch (flags & QCE_MODE_MASK) {
+ case QCE_MODE_ECB:
+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CBC:
+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CTR:
+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_XTS:
+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CCM:
+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+ break;
+ default:
+ return ~0;
+ }
+
+ return cfg;
+}
+
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+ cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
+ else
+ cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
+
+ if (IS_CCM(flags) || IS_CMAC(flags)) {
+ if (key_size == AES_KEYSIZE_128)
+ cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
+ else if (key_size == AES_KEYSIZE_256)
+ cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
+ }
+
+ if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
+ cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
+ else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
+ cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
+ else if (IS_CMAC(flags))
+ cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+
+ if (IS_SHA1(flags) || IS_SHA256(flags))
+ cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
+ else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
+ IS_CBC(flags) || IS_CTR(flags))
+ cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
+ else if (IS_AES(flags) && IS_CCM(flags))
+ cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
+ else if (IS_AES(flags) && IS_CMAC(flags))
+ cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
+
+ if (IS_SHA(flags) || IS_SHA_HMAC(flags))
+ cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+
+ if (IS_CCM(flags))
+ cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
+
+ if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
+ IS_CMAC(flags))
+ cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
+
+ return cfg;
+}
+
+static u32 qce_config_reg(struct qce_device *qce, int little)
+{
+ u32 beats = (qce->burst_size >> 3) - 1;
+ u32 pipe_pair = qce->pipe_pair_id;
+ u32 config;
+
+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+ config &= ~HIGH_SPD_EN_N_SHIFT;
+
+ if (little)
+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
+
+ return config;
+}
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+ __be32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
+
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = cpu_to_be32p((const __u32 *) s);
+ s += sizeof(__u32);
+ d++;
+ }
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+ u8 swap[QCE_AES_IV_LENGTH];
+ u32 i, j;
+
+ if (ivsize > QCE_AES_IV_LENGTH)
+ return;
+
+ memset(swap, 0, QCE_AES_IV_LENGTH);
+
+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+ i < QCE_AES_IV_LENGTH; i++, j--)
+ swap[i] = src[j];
+
+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+ unsigned int xtsdusize;
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* xts du size 512B */
+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
+
+static void qce_setup_config(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write(qce, REG_STATUS, 0);
+ qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+}
+
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
+ u32 totallen, u32 offset)
+{
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+ struct qce_device *qce = tmpl->qce;
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
+ __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+ __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+ u32 auth_cfg = 0, config;
+ unsigned int iv_words;
+
+ /* if not the last, the size has to be on the block boundary */
+ if (!rctx->last_blk && req->nbytes % blocksize)
+ return -EINVAL;
+
+ qce_setup_config(qce);
+
+ if (IS_CMAC(rctx->flags)) {
+ qce_write(qce, REG_AUTH_SEG_CFG, 0);
+ qce_write(qce, REG_ENCR_SEG_CFG, 0);
+ qce_write(qce, REG_ENCR_SEG_SIZE, 0);
+ qce_clear_array(qce, REG_AUTH_IV0, 16);
+ qce_clear_array(qce, REG_AUTH_KEY0, 16);
+ qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+ auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+ }
+
+ if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
+ u32 authkey_words = rctx->authklen / sizeof(u32);
+
+ qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
+ qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
+ authkey_words);
+ }
+
+ if (IS_CMAC(rctx->flags))
+ goto go_proc;
+
+ if (rctx->first_blk)
+ memcpy(auth, rctx->digest, digestsize);
+ else
+ qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
+
+ iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
+ qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
+
+ if (rctx->first_blk)
+ qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+ else
+ qce_write_array(qce, REG_AUTH_BYTECNT0,
+ (u32 *)rctx->byte_count, 2);
+
+ auth_cfg = qce_auth_cfg(rctx->flags, 0);
+
+ if (rctx->last_blk)
+ auth_cfg |= BIT(AUTH_LAST_SHIFT);
+ else
+ auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
+
+ if (rctx->first_blk)
+ auth_cfg |= BIT(AUTH_FIRST_SHIFT);
+ else
+ auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
+
+go_proc:
+ qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+ qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
+ qce_write(qce, REG_AUTH_SEG_START, 0);
+ qce_write(qce, REG_ENCR_SEG_CFG, 0);
+ qce_write(qce, REG_SEG_SIZE, req->nbytes);
+
+ /* get little endianness */
+ config = qce_config_reg(qce, 1);
+ qce_write(qce, REG_CONFIG, config);
+
+ qce_crypto_go(qce);
+
+ return 0;
+}
+
+static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+ u32 totallen, u32 offset)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_device *qce = tmpl->qce;
+ __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+ __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+ unsigned int enckey_words, enciv_words;
+ unsigned int keylen;
+ u32 encr_cfg = 0, auth_cfg = 0, config;
+ unsigned int ivsize = rctx->ivsize;
+ unsigned long flags = rctx->flags;
+
+ qce_setup_config(qce);
+
+ if (IS_XTS(flags))
+ keylen = ctx->enc_keylen / 2;
+ else
+ keylen = ctx->enc_keylen;
+
+ qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
+ enckey_words = keylen / sizeof(u32);
+
+ qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+
+ encr_cfg = qce_encr_cfg(flags, keylen);
+
+ if (IS_DES(flags)) {
+ enciv_words = 2;
+ enckey_words = 2;
+ } else if (IS_3DES(flags)) {
+ enciv_words = 2;
+ enckey_words = 6;
+ } else if (IS_AES(flags)) {
+ if (IS_XTS(flags))
+ qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
+ rctx->cryptlen);
+ enciv_words = 4;
+ } else {
+ return -EINVAL;
+ }
+
+ qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
+
+ if (!IS_ECB(flags)) {
+ if (IS_XTS(flags))
+ qce_xts_swapiv(enciv, rctx->iv, ivsize);
+ else
+ qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
+
+ qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
+ }
+
+ if (IS_ENCRYPT(flags))
+ encr_cfg |= BIT(ENCODE_SHIFT);
+
+ qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
+ qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
+ qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+
+ if (IS_CTR(flags)) {
+ qce_write(qce, REG_CNTR_MASK, ~0);
+ qce_write(qce, REG_CNTR_MASK0, ~0);
+ qce_write(qce, REG_CNTR_MASK1, ~0);
+ qce_write(qce, REG_CNTR_MASK2, ~0);
+ }
+
+ qce_write(qce, REG_SEG_SIZE, totallen);
+
+ /* get little endianness */
+ config = qce_config_reg(qce, 1);
+ qce_write(qce, REG_CONFIG, config);
+
+ qce_crypto_go(qce);
+
+ return 0;
+}
+
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+ u32 offset)
+{
+ switch (type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+ case CRYPTO_ALG_TYPE_AHASH:
+ return qce_setup_regs_ahash(async_req, totallen, offset);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define STATUS_ERRORS \
+ (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
+
+int qce_check_status(struct qce_device *qce, u32 *status)
+{
+ int ret = 0;
+
+ *status = qce_read(qce, REG_STATUS);
+
+ /*
+ * Don't use result dump status. The operation may not be complete.
+ * Instead, use the status we just read from device. In case, we need to
+ * use result_status from result dump the result_status needs to be byte
+ * swapped, since we set the device to little endian.
+ */
+ if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
+ ret = -ENXIO;
+
+ return ret;
+}
+
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
+{
+ u32 val;
+
+ val = qce_read(qce, REG_VERSION);
+ *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
+ *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
+ *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
+}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
new file mode 100644
index 000000000000..a4addd4f7d6c
--- /dev/null
+++ b/drivers/crypto/qce/common.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#include <linux/crypto.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+
+/* key size in bytes */
+#define QCE_SHA_HMAC_KEY_SIZE 64
+#define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256
+
+/* IV length in bytes */
+#define QCE_AES_IV_LENGTH AES_BLOCK_SIZE
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCE_MAX_IV_SIZE AES_BLOCK_SIZE
+
+/* maximum nonce bytes */
+#define QCE_MAX_NONCE 16
+#define QCE_MAX_NONCE_WORDS (QCE_MAX_NONCE / sizeof(u32))
+
+/* burst size alignment requirement */
+#define QCE_MAX_ALIGN_SIZE 64
+
+/* cipher algorithms */
+#define QCE_ALG_DES BIT(0)
+#define QCE_ALG_3DES BIT(1)
+#define QCE_ALG_AES BIT(2)
+
+/* hash and hmac algorithms */
+#define QCE_HASH_SHA1 BIT(3)
+#define QCE_HASH_SHA256 BIT(4)
+#define QCE_HASH_SHA1_HMAC BIT(5)
+#define QCE_HASH_SHA256_HMAC BIT(6)
+#define QCE_HASH_AES_CMAC BIT(7)
+
+/* cipher modes */
+#define QCE_MODE_CBC BIT(8)
+#define QCE_MODE_ECB BIT(9)
+#define QCE_MODE_CTR BIT(10)
+#define QCE_MODE_XTS BIT(11)
+#define QCE_MODE_CCM BIT(12)
+#define QCE_MODE_MASK GENMASK(12, 8)
+
+/* cipher encryption/decryption operations */
+#define QCE_ENCRYPT BIT(13)
+#define QCE_DECRYPT BIT(14)
+
+#define IS_DES(flags) (flags & QCE_ALG_DES)
+#define IS_3DES(flags) (flags & QCE_ALG_3DES)
+#define IS_AES(flags) (flags & QCE_ALG_AES)
+
+#define IS_SHA1(flags) (flags & QCE_HASH_SHA1)
+#define IS_SHA256(flags) (flags & QCE_HASH_SHA256)
+#define IS_SHA1_HMAC(flags) (flags & QCE_HASH_SHA1_HMAC)
+#define IS_SHA256_HMAC(flags) (flags & QCE_HASH_SHA256_HMAC)
+#define IS_CMAC(flags) (flags & QCE_HASH_AES_CMAC)
+#define IS_SHA(flags) (IS_SHA1(flags) || IS_SHA256(flags))
+#define IS_SHA_HMAC(flags) \
+ (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
+
+#define IS_CBC(mode) (mode & QCE_MODE_CBC)
+#define IS_ECB(mode) (mode & QCE_MODE_ECB)
+#define IS_CTR(mode) (mode & QCE_MODE_CTR)
+#define IS_XTS(mode) (mode & QCE_MODE_XTS)
+#define IS_CCM(mode) (mode & QCE_MODE_CCM)
+
+#define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT)
+#define IS_DECRYPT(dir) (dir & QCE_DECRYPT)
+
+struct qce_alg_template {
+ struct list_head entry;
+ u32 crypto_alg_type;
+ unsigned long alg_flags;
+ const u32 *std_iv;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg ahash;
+ } alg;
+ struct qce_device *qce;
+};
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
+int qce_check_status(struct qce_device *qce, u32 *status);
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+ u32 offset);
+
+#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
new file mode 100644
index 000000000000..33ae3545dc48
--- /dev/null
+++ b/drivers/crypto/qce/core.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+
+#include "core.h"
+#include "cipher.h"
+#include "sha.h"
+
+#define QCE_MAJOR_VERSION5 0x05
+#define QCE_QUEUE_LENGTH 1
+
+static const struct qce_algo_ops *qce_ops[] = {
+ &ablkcipher_ops,
+ &ahash_ops,
+};
+
+static void qce_unregister_algs(struct qce_device *qce)
+{
+ const struct qce_algo_ops *ops;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+ ops = qce_ops[i];
+ ops->unregister_algs(qce);
+ }
+}
+
+static int qce_register_algs(struct qce_device *qce)
+{
+ const struct qce_algo_ops *ops;
+ int i, ret = -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+ ops = qce_ops[i];
+ ret = ops->register_algs(qce);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int qce_handle_request(struct crypto_async_request *async_req)
+{
+ int ret = -EINVAL, i;
+ const struct qce_algo_ops *ops;
+ u32 type = crypto_tfm_alg_type(async_req->tfm);
+
+ for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+ ops = qce_ops[i];
+ if (type != ops->type)
+ continue;
+ ret = ops->async_req_handle(async_req);
+ break;
+ }
+
+ return ret;
+}
+
+static int qce_handle_queue(struct qce_device *qce,
+ struct crypto_async_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ unsigned long flags;
+ int ret = 0, err;
+
+ spin_lock_irqsave(&qce->lock, flags);
+
+ if (req)
+ ret = crypto_enqueue_request(&qce->queue, req);
+
+ /* busy, do not dequeue request */
+ if (qce->req) {
+ spin_unlock_irqrestore(&qce->lock, flags);
+ return ret;
+ }
+
+ backlog = crypto_get_backlog(&qce->queue);
+ async_req = crypto_dequeue_request(&qce->queue);
+ if (async_req)
+ qce->req = async_req;
+
+ spin_unlock_irqrestore(&qce->lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog) {
+ spin_lock_bh(&qce->lock);
+ backlog->complete(backlog, -EINPROGRESS);
+ spin_unlock_bh(&qce->lock);
+ }
+
+ err = qce_handle_request(async_req);
+ if (err) {
+ qce->result = err;
+ tasklet_schedule(&qce->done_tasklet);
+ }
+
+ return ret;
+}
+
+static void qce_tasklet_req_done(unsigned long data)
+{
+ struct qce_device *qce = (struct qce_device *)data;
+ struct crypto_async_request *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qce->lock, flags);
+ req = qce->req;
+ qce->req = NULL;
+ spin_unlock_irqrestore(&qce->lock, flags);
+
+ if (req)
+ req->complete(req, qce->result);
+
+ qce_handle_queue(qce, NULL);
+}
+
+static int qce_async_request_enqueue(struct qce_device *qce,
+ struct crypto_async_request *req)
+{
+ return qce_handle_queue(qce, req);
+}
+
+static void qce_async_request_done(struct qce_device *qce, int ret)
+{
+ qce->result = ret;
+ tasklet_schedule(&qce->done_tasklet);
+}
+
+static int qce_check_version(struct qce_device *qce)
+{
+ u32 major, minor, step;
+
+ qce_get_version(qce, &major, &minor, &step);
+
+ /*
+ * the driver does not support v5 with minor 0 because it has special
+ * alignment requirements.
+ */
+ if (major != QCE_MAJOR_VERSION5 || minor == 0)
+ return -ENODEV;
+
+ qce->burst_size = QCE_BAM_BURST_SIZE;
+ qce->pipe_pair_id = 1;
+
+ dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
+ major, minor, step);
+
+ return 0;
+}
+
+static int qce_crypto_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qce_device *qce;
+ struct resource *res;
+ int ret;
+
+ qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
+ if (!qce)
+ return -ENOMEM;
+
+ qce->dev = dev;
+ platform_set_drvdata(pdev, qce);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ qce->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qce->base))
+ return PTR_ERR(qce->base);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret < 0)
+ return ret;
+
+ qce->core = devm_clk_get(qce->dev, "core");
+ if (IS_ERR(qce->core))
+ return PTR_ERR(qce->core);
+
+ qce->iface = devm_clk_get(qce->dev, "iface");
+ if (IS_ERR(qce->iface))
+ return PTR_ERR(qce->iface);
+
+ qce->bus = devm_clk_get(qce->dev, "bus");
+ if (IS_ERR(qce->bus))
+ return PTR_ERR(qce->bus);
+
+ ret = clk_prepare_enable(qce->core);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(qce->iface);
+ if (ret)
+ goto err_clks_core;
+
+ ret = clk_prepare_enable(qce->bus);
+ if (ret)
+ goto err_clks_iface;
+
+ ret = qce_dma_request(qce->dev, &qce->dma);
+ if (ret)
+ goto err_clks;
+
+ ret = qce_check_version(qce);
+ if (ret)
+ goto err_clks;
+
+ spin_lock_init(&qce->lock);
+ tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
+ (unsigned long)qce);
+ crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
+
+ qce->async_req_enqueue = qce_async_request_enqueue;
+ qce->async_req_done = qce_async_request_done;
+
+ ret = qce_register_algs(qce);
+ if (ret)
+ goto err_dma;
+
+ return 0;
+
+err_dma:
+ qce_dma_release(&qce->dma);
+err_clks:
+ clk_disable_unprepare(qce->bus);
+err_clks_iface:
+ clk_disable_unprepare(qce->iface);
+err_clks_core:
+ clk_disable_unprepare(qce->core);
+ return ret;
+}
+
+static int qce_crypto_remove(struct platform_device *pdev)
+{
+ struct qce_device *qce = platform_get_drvdata(pdev);
+
+ tasklet_kill(&qce->done_tasklet);
+ qce_unregister_algs(qce);
+ qce_dma_release(&qce->dma);
+ clk_disable_unprepare(qce->bus);
+ clk_disable_unprepare(qce->iface);
+ clk_disable_unprepare(qce->core);
+ return 0;
+}
+
+static const struct of_device_id qce_crypto_of_match[] = {
+ { .compatible = "qcom,crypto-v5.1", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
+
+static struct platform_driver qce_crypto_driver = {
+ .probe = qce_crypto_probe,
+ .remove = qce_crypto_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .of_match_table = qce_crypto_of_match,
+ },
+};
+module_platform_driver(qce_crypto_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm crypto engine driver");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("The Linux Foundation");
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
new file mode 100644
index 000000000000..549965d4d91f
--- /dev/null
+++ b/drivers/crypto/qce/core.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include "dma.h"
+
+/**
+ * struct qce_device - crypto engine device structure
+ * @queue: crypto request queue
+ * @lock: the lock protects queue and req
+ * @done_tasklet: done tasklet object
+ * @req: current active request
+ * @result: result of current transform
+ * @base: virtual IO base
+ * @dev: pointer to device structure
+ * @core: core device clock
+ * @iface: interface clock
+ * @bus: bus clock
+ * @dma: pointer to dma data
+ * @burst_size: the crypto burst size
+ * @pipe_pair_id: which pipe pair id the device using
+ * @async_req_enqueue: invoked by every algorithm to enqueue a request
+ * @async_req_done: invoked by every algorithm to finish its request
+ */
+struct qce_device {
+ struct crypto_queue queue;
+ spinlock_t lock;
+ struct tasklet_struct done_tasklet;
+ struct crypto_async_request *req;
+ int result;
+ void __iomem *base;
+ struct device *dev;
+ struct clk *core, *iface, *bus;
+ struct qce_dma_data dma;
+ int burst_size;
+ unsigned int pipe_pair_id;
+ int (*async_req_enqueue)(struct qce_device *qce,
+ struct crypto_async_request *req);
+ void (*async_req_done)(struct qce_device *qce, int ret);
+};
+
+/**
+ * struct qce_algo_ops - algorithm operations per crypto type
+ * @type: should be CRYPTO_ALG_TYPE_XXX
+ * @register_algs: invoked by core to register the algorithms
+ * @unregister_algs: invoked by core to unregister the algorithms
+ * @async_req_handle: invoked by core to handle enqueued request
+ */
+struct qce_algo_ops {
+ u32 type;
+ int (*register_algs)(struct qce_device *qce);
+ void (*unregister_algs)(struct qce_device *qce);
+ int (*async_req_handle)(struct crypto_async_request *async_req);
+};
+
+#endif /* _CORE_H_ */
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
new file mode 100644
index 000000000000..0fb21e13f247
--- /dev/null
+++ b/drivers/crypto/qce/dma.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <crypto/scatterwalk.h>
+
+#include "dma.h"
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+{
+ int ret;
+
+ dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(dma->txchan))
+ return PTR_ERR(dma->txchan);
+
+ dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(dma->rxchan)) {
+ ret = PTR_ERR(dma->rxchan);
+ goto error_rx;
+ }
+
+ dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
+ GFP_KERNEL);
+ if (!dma->result_buf) {
+ ret = -ENOMEM;
+ goto error_nomem;
+ }
+
+ dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
+
+ return 0;
+error_nomem:
+ dma_release_channel(dma->rxchan);
+error_rx:
+ dma_release_channel(dma->txchan);
+ return ret;
+}
+
+void qce_dma_release(struct qce_dma_data *dma)
+{
+ dma_release_channel(dma->txchan);
+ dma_release_channel(dma->rxchan);
+ kfree(dma->result_buf);
+}
+
+int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, bool chained)
+{
+ int err;
+
+ if (chained) {
+ while (sg) {
+ err = dma_map_sg(dev, sg, 1, dir);
+ if (!err)
+ return -EFAULT;
+ sg = scatterwalk_sg_next(sg);
+ }
+ } else {
+ err = dma_map_sg(dev, sg, nents, dir);
+ if (!err)
+ return -EFAULT;
+ }
+
+ return nents;
+}
+
+void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, bool chained)
+{
+ if (chained)
+ while (sg) {
+ dma_unmap_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+ else
+ dma_unmap_sg(dev, sg, nents, dir);
+}
+
+int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
+{
+ struct scatterlist *sg = sglist;
+ int nents = 0;
+
+ if (chained)
+ *chained = false;
+
+ while (nbytes > 0 && sg) {
+ nents++;
+ nbytes -= sg->length;
+ if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
+ *chained = true;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return nents;
+}
+
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+{
+ struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
+
+ while (sg) {
+ if (!sg_page(sg))
+ break;
+ sg = sg_next(sg);
+ }
+
+ if (!sg)
+ return ERR_PTR(-EINVAL);
+
+ while (new_sgl && sg) {
+ sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
+ new_sgl->offset);
+ sg_last = sg;
+ sg = sg_next(sg);
+ new_sgl = sg_next(new_sgl);
+ }
+
+ return sg_last;
+}
+
+static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
+ int nents, unsigned long flags,
+ enum dma_transfer_direction dir,
+ dma_async_tx_callback cb, void *cb_param)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+
+ if (!sg || !nents)
+ return -EINVAL;
+
+ desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
+ if (!desc)
+ return -EINVAL;
+
+ desc->callback = cb;
+ desc->callback_param = cb_param;
+ cookie = dmaengine_submit(desc);
+
+ return dma_submit_error(cookie);
+}
+
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
+ int rx_nents, struct scatterlist *tx_sg, int tx_nents,
+ dma_async_tx_callback cb, void *cb_param)
+{
+ struct dma_chan *rxchan = dma->rxchan;
+ struct dma_chan *txchan = dma->txchan;
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ int ret;
+
+ ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
+ NULL, NULL);
+ if (ret)
+ return ret;
+
+ return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
+ cb, cb_param);
+}
+
+void qce_dma_issue_pending(struct qce_dma_data *dma)
+{
+ dma_async_issue_pending(dma->rxchan);
+ dma_async_issue_pending(dma->txchan);
+}
+
+int qce_dma_terminate_all(struct qce_dma_data *dma)
+{
+ int ret;
+
+ ret = dmaengine_terminate_all(dma->rxchan);
+ return ret ?: dmaengine_terminate_all(dma->txchan);
+}
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
new file mode 100644
index 000000000000..805e378d59e9
--- /dev/null
+++ b/drivers/crypto/qce/dma.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DMA_H_
+#define _DMA_H_
+
+/* maximum data transfer block size between BAM and CE */
+#define QCE_BAM_BURST_SIZE 64
+
+#define QCE_AUTHIV_REGS_CNT 16
+#define QCE_AUTH_BYTECOUNT_REGS_CNT 4
+#define QCE_CNTRIV_REGS_CNT 4
+
+struct qce_result_dump {
+ u32 auth_iv[QCE_AUTHIV_REGS_CNT];
+ u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
+ u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
+ u32 status;
+ u32 status2;
+};
+
+#define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE)
+#define QCE_RESULT_BUF_SZ \
+ ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
+
+struct qce_dma_data {
+ struct dma_chan *txchan;
+ struct dma_chan *rxchan;
+ struct qce_result_dump *result_buf;
+ void *ignore_buf;
+};
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
+void qce_dma_release(struct qce_dma_data *dma);
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
+ int in_ents, struct scatterlist *sg_out, int out_ents,
+ dma_async_tx_callback cb, void *cb_param);
+void qce_dma_issue_pending(struct qce_dma_data *dma);
+int qce_dma_terminate_all(struct qce_dma_data *dma);
+int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
+void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, bool chained);
+int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, bool chained);
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+
+#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h
new file mode 100644
index 000000000000..f0e19e35664a
--- /dev/null
+++ b/drivers/crypto/qce/regs-v5.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _REGS_V5_H_
+#define _REGS_V5_H_
+
+#include <linux/bitops.h>
+
+#define REG_VERSION 0x000
+#define REG_STATUS 0x100
+#define REG_STATUS2 0x104
+#define REG_ENGINES_AVAIL 0x108
+#define REG_FIFO_SIZES 0x10c
+#define REG_SEG_SIZE 0x110
+#define REG_GOPROC 0x120
+#define REG_ENCR_SEG_CFG 0x200
+#define REG_ENCR_SEG_SIZE 0x204
+#define REG_ENCR_SEG_START 0x208
+#define REG_CNTR0_IV0 0x20c
+#define REG_CNTR1_IV1 0x210
+#define REG_CNTR2_IV2 0x214
+#define REG_CNTR3_IV3 0x218
+#define REG_CNTR_MASK 0x21C
+#define REG_ENCR_CCM_INT_CNTR0 0x220
+#define REG_ENCR_CCM_INT_CNTR1 0x224
+#define REG_ENCR_CCM_INT_CNTR2 0x228
+#define REG_ENCR_CCM_INT_CNTR3 0x22c
+#define REG_ENCR_XTS_DU_SIZE 0x230
+#define REG_CNTR_MASK2 0x234
+#define REG_CNTR_MASK1 0x238
+#define REG_CNTR_MASK0 0x23c
+#define REG_AUTH_SEG_CFG 0x300
+#define REG_AUTH_SEG_SIZE 0x304
+#define REG_AUTH_SEG_START 0x308
+#define REG_AUTH_IV0 0x310
+#define REG_AUTH_IV1 0x314
+#define REG_AUTH_IV2 0x318
+#define REG_AUTH_IV3 0x31c
+#define REG_AUTH_IV4 0x320
+#define REG_AUTH_IV5 0x324
+#define REG_AUTH_IV6 0x328
+#define REG_AUTH_IV7 0x32c
+#define REG_AUTH_IV8 0x330
+#define REG_AUTH_IV9 0x334
+#define REG_AUTH_IV10 0x338
+#define REG_AUTH_IV11 0x33c
+#define REG_AUTH_IV12 0x340
+#define REG_AUTH_IV13 0x344
+#define REG_AUTH_IV14 0x348
+#define REG_AUTH_IV15 0x34c
+#define REG_AUTH_INFO_NONCE0 0x350
+#define REG_AUTH_INFO_NONCE1 0x354
+#define REG_AUTH_INFO_NONCE2 0x358
+#define REG_AUTH_INFO_NONCE3 0x35c
+#define REG_AUTH_BYTECNT0 0x390
+#define REG_AUTH_BYTECNT1 0x394
+#define REG_AUTH_BYTECNT2 0x398
+#define REG_AUTH_BYTECNT3 0x39c
+#define REG_AUTH_EXP_MAC0 0x3a0
+#define REG_AUTH_EXP_MAC1 0x3a4
+#define REG_AUTH_EXP_MAC2 0x3a8
+#define REG_AUTH_EXP_MAC3 0x3ac
+#define REG_AUTH_EXP_MAC4 0x3b0
+#define REG_AUTH_EXP_MAC5 0x3b4
+#define REG_AUTH_EXP_MAC6 0x3b8
+#define REG_AUTH_EXP_MAC7 0x3bc
+#define REG_CONFIG 0x400
+#define REG_GOPROC_QC_KEY 0x1000
+#define REG_GOPROC_OEM_KEY 0x2000
+#define REG_ENCR_KEY0 0x3000
+#define REG_ENCR_KEY1 0x3004
+#define REG_ENCR_KEY2 0x3008
+#define REG_ENCR_KEY3 0x300c
+#define REG_ENCR_KEY4 0x3010
+#define REG_ENCR_KEY5 0x3014
+#define REG_ENCR_KEY6 0x3018
+#define REG_ENCR_KEY7 0x301c
+#define REG_ENCR_XTS_KEY0 0x3020
+#define REG_ENCR_XTS_KEY1 0x3024
+#define REG_ENCR_XTS_KEY2 0x3028
+#define REG_ENCR_XTS_KEY3 0x302c
+#define REG_ENCR_XTS_KEY4 0x3030
+#define REG_ENCR_XTS_KEY5 0x3034
+#define REG_ENCR_XTS_KEY6 0x3038
+#define REG_ENCR_XTS_KEY7 0x303c
+#define REG_AUTH_KEY0 0x3040
+#define REG_AUTH_KEY1 0x3044
+#define REG_AUTH_KEY2 0x3048
+#define REG_AUTH_KEY3 0x304c
+#define REG_AUTH_KEY4 0x3050
+#define REG_AUTH_KEY5 0x3054
+#define REG_AUTH_KEY6 0x3058
+#define REG_AUTH_KEY7 0x305c
+#define REG_AUTH_KEY8 0x3060
+#define REG_AUTH_KEY9 0x3064
+#define REG_AUTH_KEY10 0x3068
+#define REG_AUTH_KEY11 0x306c
+#define REG_AUTH_KEY12 0x3070
+#define REG_AUTH_KEY13 0x3074
+#define REG_AUTH_KEY14 0x3078
+#define REG_AUTH_KEY15 0x307c
+
+/* Register bits - REG_VERSION */
+#define CORE_STEP_REV_SHIFT 0
+#define CORE_STEP_REV_MASK GENMASK(15, 0)
+#define CORE_MINOR_REV_SHIFT 16
+#define CORE_MINOR_REV_MASK GENMASK(23, 16)
+#define CORE_MAJOR_REV_SHIFT 24
+#define CORE_MAJOR_REV_MASK GENMASK(31, 24)
+
+/* Register bits - REG_STATUS */
+#define MAC_FAILED_SHIFT 31
+#define DOUT_SIZE_AVAIL_SHIFT 26
+#define DOUT_SIZE_AVAIL_MASK GENMASK(30, 26)
+#define DIN_SIZE_AVAIL_SHIFT 21
+#define DIN_SIZE_AVAIL_MASK GENMASK(25, 21)
+#define HSD_ERR_SHIFT 20
+#define ACCESS_VIOL_SHIFT 19
+#define PIPE_ACTIVE_ERR_SHIFT 18
+#define CFG_CHNG_ERR_SHIFT 17
+#define DOUT_ERR_SHIFT 16
+#define DIN_ERR_SHIFT 15
+#define AXI_ERR_SHIFT 14
+#define CRYPTO_STATE_SHIFT 10
+#define CRYPTO_STATE_MASK GENMASK(13, 10)
+#define ENCR_BUSY_SHIFT 9
+#define AUTH_BUSY_SHIFT 8
+#define DOUT_INTR_SHIFT 7
+#define DIN_INTR_SHIFT 6
+#define OP_DONE_INTR_SHIFT 5
+#define ERR_INTR_SHIFT 4
+#define DOUT_RDY_SHIFT 3
+#define DIN_RDY_SHIFT 2
+#define OPERATION_DONE_SHIFT 1
+#define SW_ERR_SHIFT 0
+
+/* Register bits - REG_STATUS2 */
+#define AXI_EXTRA_SHIFT 1
+#define LOCKED_SHIFT 2
+
+/* Register bits - REG_CONFIG */
+#define REQ_SIZE_SHIFT 17
+#define REQ_SIZE_MASK GENMASK(20, 17)
+#define REQ_SIZE_ENUM_1_BEAT 0
+#define REQ_SIZE_ENUM_2_BEAT 1
+#define REQ_SIZE_ENUM_3_BEAT 2
+#define REQ_SIZE_ENUM_4_BEAT 3
+#define REQ_SIZE_ENUM_5_BEAT 4
+#define REQ_SIZE_ENUM_6_BEAT 5
+#define REQ_SIZE_ENUM_7_BEAT 6
+#define REQ_SIZE_ENUM_8_BEAT 7
+#define REQ_SIZE_ENUM_9_BEAT 8
+#define REQ_SIZE_ENUM_10_BEAT 9
+#define REQ_SIZE_ENUM_11_BEAT 10
+#define REQ_SIZE_ENUM_12_BEAT 11
+#define REQ_SIZE_ENUM_13_BEAT 12
+#define REQ_SIZE_ENUM_14_BEAT 13
+#define REQ_SIZE_ENUM_15_BEAT 14
+#define REQ_SIZE_ENUM_16_BEAT 15
+
+#define MAX_QUEUED_REQ_SHIFT 14
+#define MAX_QUEUED_REQ_MASK GENMASK(24, 16)
+#define ENUM_1_QUEUED_REQS 0
+#define ENUM_2_QUEUED_REQS 1
+#define ENUM_3_QUEUED_REQS 2
+
+#define IRQ_ENABLES_SHIFT 10
+#define IRQ_ENABLES_MASK GENMASK(13, 10)
+
+#define LITTLE_ENDIAN_MODE_SHIFT 9
+#define PIPE_SET_SELECT_SHIFT 5
+#define PIPE_SET_SELECT_MASK GENMASK(8, 5)
+
+#define HIGH_SPD_EN_N_SHIFT 4
+#define MASK_DOUT_INTR_SHIFT 3
+#define MASK_DIN_INTR_SHIFT 2
+#define MASK_OP_DONE_INTR_SHIFT 1
+#define MASK_ERR_INTR_SHIFT 0
+
+/* Register bits - REG_AUTH_SEG_CFG */
+#define COMP_EXP_MAC_SHIFT 24
+#define COMP_EXP_MAC_DISABLED 0
+#define COMP_EXP_MAC_ENABLED 1
+
+#define F9_DIRECTION_SHIFT 23
+#define F9_DIRECTION_UPLINK 0
+#define F9_DIRECTION_DOWNLINK 1
+
+#define AUTH_NONCE_NUM_WORDS_SHIFT 20
+#define AUTH_NONCE_NUM_WORDS_MASK GENMASK(22, 20)
+
+#define USE_PIPE_KEY_AUTH_SHIFT 19
+#define USE_HW_KEY_AUTH_SHIFT 18
+#define AUTH_FIRST_SHIFT 17
+#define AUTH_LAST_SHIFT 16
+
+#define AUTH_POS_SHIFT 14
+#define AUTH_POS_MASK GENMASK(15, 14)
+#define AUTH_POS_BEFORE 0
+#define AUTH_POS_AFTER 1
+
+#define AUTH_SIZE_SHIFT 9
+#define AUTH_SIZE_MASK GENMASK(13, 9)
+#define AUTH_SIZE_SHA1 0
+#define AUTH_SIZE_SHA256 1
+#define AUTH_SIZE_ENUM_1_BYTES 0
+#define AUTH_SIZE_ENUM_2_BYTES 1
+#define AUTH_SIZE_ENUM_3_BYTES 2
+#define AUTH_SIZE_ENUM_4_BYTES 3
+#define AUTH_SIZE_ENUM_5_BYTES 4
+#define AUTH_SIZE_ENUM_6_BYTES 5
+#define AUTH_SIZE_ENUM_7_BYTES 6
+#define AUTH_SIZE_ENUM_8_BYTES 7
+#define AUTH_SIZE_ENUM_9_BYTES 8
+#define AUTH_SIZE_ENUM_10_BYTES 9
+#define AUTH_SIZE_ENUM_11_BYTES 10
+#define AUTH_SIZE_ENUM_12_BYTES 11
+#define AUTH_SIZE_ENUM_13_BYTES 12
+#define AUTH_SIZE_ENUM_14_BYTES 13
+#define AUTH_SIZE_ENUM_15_BYTES 14
+#define AUTH_SIZE_ENUM_16_BYTES 15
+
+#define AUTH_MODE_SHIFT 6
+#define AUTH_MODE_MASK GENMASK(8, 6)
+#define AUTH_MODE_HASH 0
+#define AUTH_MODE_HMAC 1
+#define AUTH_MODE_CCM 0
+#define AUTH_MODE_CMAC 1
+
+#define AUTH_KEY_SIZE_SHIFT 3
+#define AUTH_KEY_SIZE_MASK GENMASK(5, 3)
+#define AUTH_KEY_SZ_AES128 0
+#define AUTH_KEY_SZ_AES256 2
+
+#define AUTH_ALG_SHIFT 0
+#define AUTH_ALG_MASK GENMASK(2, 0)
+#define AUTH_ALG_NONE 0
+#define AUTH_ALG_SHA 1
+#define AUTH_ALG_AES 2
+#define AUTH_ALG_KASUMI 3
+#define AUTH_ALG_SNOW3G 4
+#define AUTH_ALG_ZUC 5
+
+/* Register bits - REG_ENCR_XTS_DU_SIZE */
+#define ENCR_XTS_DU_SIZE_SHIFT 0
+#define ENCR_XTS_DU_SIZE_MASK GENMASK(19, 0)
+
+/* Register bits - REG_ENCR_SEG_CFG */
+#define F8_KEYSTREAM_ENABLE_SHIFT 17
+#define F8_KEYSTREAM_DISABLED 0
+#define F8_KEYSTREAM_ENABLED 1
+
+#define F8_DIRECTION_SHIFT 16
+#define F8_DIRECTION_UPLINK 0
+#define F8_DIRECTION_DOWNLINK 1
+
+#define USE_PIPE_KEY_ENCR_SHIFT 15
+#define USE_PIPE_KEY_ENCR_ENABLED 1
+#define USE_KEY_REGISTERS 0
+
+#define USE_HW_KEY_ENCR_SHIFT 14
+#define USE_KEY_REG 0
+#define USE_HW_KEY 1
+
+#define LAST_CCM_SHIFT 13
+#define LAST_CCM_XFR 1
+#define INTERM_CCM_XFR 0
+
+#define CNTR_ALG_SHIFT 11
+#define CNTR_ALG_MASK GENMASK(12, 11)
+#define CNTR_ALG_NIST 0
+
+#define ENCODE_SHIFT 10
+
+#define ENCR_MODE_SHIFT 6
+#define ENCR_MODE_MASK GENMASK(9, 6)
+#define ENCR_MODE_ECB 0
+#define ENCR_MODE_CBC 1
+#define ENCR_MODE_CTR 2
+#define ENCR_MODE_XTS 3
+#define ENCR_MODE_CCM 4
+
+#define ENCR_KEY_SZ_SHIFT 3
+#define ENCR_KEY_SZ_MASK GENMASK(5, 3)
+#define ENCR_KEY_SZ_DES 0
+#define ENCR_KEY_SZ_3DES 1
+#define ENCR_KEY_SZ_AES128 0
+#define ENCR_KEY_SZ_AES256 2
+
+#define ENCR_ALG_SHIFT 0
+#define ENCR_ALG_MASK GENMASK(2, 0)
+#define ENCR_ALG_NONE 0
+#define ENCR_ALG_DES 1
+#define ENCR_ALG_AES 2
+#define ENCR_ALG_KASUMI 4
+#define ENCR_ALG_SNOW_3G 5
+#define ENCR_ALG_ZUC 6
+
+/* Register bits - REG_GOPROC */
+#define GO_SHIFT 0
+#define CLR_CNTXT_SHIFT 1
+#define RESULTS_DUMP_SHIFT 2
+
+/* Register bits - REG_ENGINES_AVAIL */
+#define ENCR_AES_SEL_SHIFT 0
+#define DES_SEL_SHIFT 1
+#define ENCR_SNOW3G_SEL_SHIFT 2
+#define ENCR_KASUMI_SEL_SHIFT 3
+#define SHA_SEL_SHIFT 4
+#define SHA512_SEL_SHIFT 5
+#define AUTH_AES_SEL_SHIFT 6
+#define AUTH_SNOW3G_SEL_SHIFT 7
+#define AUTH_KASUMI_SEL_SHIFT 8
+#define BAM_PIPE_SETS_SHIFT 9
+#define BAM_PIPE_SETS_MASK GENMASK(12, 9)
+#define AXI_WR_BEATS_SHIFT 13
+#define AXI_WR_BEATS_MASK GENMASK(18, 13)
+#define AXI_RD_BEATS_SHIFT 19
+#define AXI_RD_BEATS_MASK GENMASK(24, 19)
+#define ENCR_ZUC_SEL_SHIFT 26
+#define AUTH_ZUC_SEL_SHIFT 27
+#define ZUC_ENABLE_SHIFT 28
+
+#endif /* _REGS_V5_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
new file mode 100644
index 000000000000..f3385934eed2
--- /dev/null
+++ b/drivers/crypto/qce/sha.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <crypto/internal/hash.h>
+
+#include "common.h"
+#include "core.h"
+#include "sha.h"
+
+/* crypto hw padding constant for first operation */
+#define SHA_PADDING 64
+#define SHA_PADDING_MASK (SHA_PADDING - 1)
+
+static LIST_HEAD(ahash_algs);
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static void qce_ahash_done(void *data)
+{
+ struct crypto_async_request *async_req = data;
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+ struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result = qce->dma.result_buf;
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ int error;
+ u32 status;
+
+ error = qce_dma_terminate_all(&qce->dma);
+ if (error)
+ dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
+
+ qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+ rctx->src_chained);
+ qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+
+ memcpy(rctx->digest, result->auth_iv, digestsize);
+ if (req->result)
+ memcpy(req->result, result->auth_iv, digestsize);
+
+ rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
+ rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);
+
+ error = qce_check_status(qce, &status);
+ if (error < 0)
+ dev_dbg(qce->dev, "ahash operation error (%x)\n", status);
+
+ req->src = rctx->src_orig;
+ req->nbytes = rctx->nbytes_orig;
+ rctx->last_blk = false;
+ rctx->first_blk = false;
+
+ qce->async_req_done(tmpl->qce, error);
+}
+
+static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
+{
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+ struct qce_device *qce = tmpl->qce;
+ unsigned long flags = rctx->flags;
+ int ret;
+
+ if (IS_SHA_HMAC(flags)) {
+ rctx->authkey = ctx->authkey;
+ rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
+ } else if (IS_CMAC(flags)) {
+ rctx->authkey = ctx->authkey;
+ rctx->authklen = AES_KEYSIZE_128;
+ }
+
+ rctx->src_nents = qce_countsg(req->src, req->nbytes,
+ &rctx->src_chained);
+ ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+ rctx->src_chained);
+ if (ret < 0)
+ return ret;
+
+ sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+ ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+ if (ret < 0)
+ goto error_unmap_src;
+
+ ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
+ &rctx->result_sg, 1, qce_ahash_done, async_req);
+ if (ret)
+ goto error_unmap_dst;
+
+ qce_dma_issue_pending(&qce->dma);
+
+ ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+ if (ret)
+ goto error_terminate;
+
+ return 0;
+
+error_terminate:
+ qce_dma_terminate_all(&qce->dma);
+error_unmap_dst:
+ qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+error_unmap_src:
+ qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+ rctx->src_chained);
+ return ret;
+}
+
+static int qce_ahash_init(struct ahash_request *req)
+{
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+ const u32 *std_iv = tmpl->std_iv;
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->first_blk = true;
+ rctx->last_blk = false;
+ rctx->flags = tmpl->alg_flags;
+ memcpy(rctx->digest, std_iv, sizeof(rctx->digest));
+
+ return 0;
+}
+
+static int qce_ahash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ unsigned long flags = rctx->flags;
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+
+ if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+ struct sha1_state *out_state = out;
+
+ out_state->count = rctx->count;
+ qce_cpu_to_be32p_array((__be32 *)out_state->state,
+ rctx->digest, digestsize);
+ memcpy(out_state->buffer, rctx->buf, blocksize);
+ } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+ struct sha256_state *out_state = out;
+
+ out_state->count = rctx->count;
+ qce_cpu_to_be32p_array((__be32 *)out_state->state,
+ rctx->digest, digestsize);
+ memcpy(out_state->buf, rctx->buf, blocksize);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qce_import_common(struct ahash_request *req, u64 in_count,
+ const u32 *state, const u8 *buffer, bool hmac)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ unsigned int blocksize;
+ u64 count = in_count;
+
+ blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+ rctx->count = in_count;
+ memcpy(rctx->buf, buffer, blocksize);
+
+ if (in_count <= blocksize) {
+ rctx->first_blk = 1;
+ } else {
+ rctx->first_blk = 0;
+ /*
+ * For HMAC, there is a hardware padding done when first block
+ * is set. Therefore the byte_count must be incremened by 64
+ * after the first block operation.
+ */
+ if (hmac)
+ count += SHA_PADDING;
+ }
+
+ rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
+ rctx->byte_count[1] = (__force __be32)(count >> 32);
+ qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
+ digestsize);
+ rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+
+ return 0;
+}
+
+static int qce_ahash_import(struct ahash_request *req, const void *in)
+{
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ unsigned long flags = rctx->flags;
+ bool hmac = IS_SHA_HMAC(flags);
+ int ret = -EINVAL;
+
+ if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+ const struct sha1_state *state = in;
+
+ ret = qce_import_common(req, state->count, state->state,
+ state->buffer, hmac);
+ } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+ const struct sha256_state *state = in;
+
+ ret = qce_import_common(req, state->count, state->state,
+ state->buf, hmac);
+ }
+
+ return ret;
+}
+
+static int qce_ahash_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+ struct qce_device *qce = tmpl->qce;
+ struct scatterlist *sg_last, *sg;
+ unsigned int total, len;
+ unsigned int hash_later;
+ unsigned int nbytes;
+ unsigned int blocksize;
+
+ blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ rctx->count += req->nbytes;
+
+ /* check for buffer from previous updates and append it */
+ total = req->nbytes + rctx->buflen;
+
+ if (total <= blocksize) {
+ scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src,
+ 0, req->nbytes, 0);
+ rctx->buflen += req->nbytes;
+ return 0;
+ }
+
+ /* save the original req structure fields */
+ rctx->src_orig = req->src;
+ rctx->nbytes_orig = req->nbytes;
+
+ /*
+ * if we have data from previous update copy them on buffer. The old
+ * data will be combined with current request bytes.
+ */
+ if (rctx->buflen)
+ memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+
+ /* calculate how many bytes will be hashed later */
+ hash_later = total % blocksize;
+ if (hash_later) {
+ unsigned int src_offset = req->nbytes - hash_later;
+ scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
+ hash_later, 0);
+ }
+
+ /* here nbytes is multiple of blocksize */
+ nbytes = total - hash_later;
+
+ len = rctx->buflen;
+ sg = sg_last = req->src;
+
+ while (len < nbytes && sg) {
+ if (len + sg_dma_len(sg) > nbytes)
+ break;
+ len += sg_dma_len(sg);
+ sg_last = sg;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ if (!sg_last)
+ return -EINVAL;
+
+ sg_mark_end(sg_last);
+
+ if (rctx->buflen) {
+ sg_init_table(rctx->sg, 2);
+ sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
+ scatterwalk_sg_chain(rctx->sg, 2, req->src);
+ req->src = rctx->sg;
+ }
+
+ req->nbytes = nbytes;
+ rctx->buflen = hash_later;
+
+ return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_final(struct ahash_request *req)
+{
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+ struct qce_device *qce = tmpl->qce;
+
+ if (!rctx->buflen)
+ return 0;
+
+ rctx->last_blk = true;
+
+ rctx->src_orig = req->src;
+ rctx->nbytes_orig = req->nbytes;
+
+ memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+ sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen);
+
+ req->src = rctx->sg;
+ req->nbytes = rctx->buflen;
+
+ return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_digest(struct ahash_request *req)
+{
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+ struct qce_device *qce = tmpl->qce;
+ int ret;
+
+ ret = qce_ahash_init(req);
+ if (ret)
+ return ret;
+
+ rctx->src_orig = req->src;
+ rctx->nbytes_orig = req->nbytes;
+ rctx->first_blk = true;
+ rctx->last_blk = true;
+
+ return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+struct qce_ahash_result {
+ struct completion completion;
+ int error;
+};
+
+static void qce_digest_complete(struct crypto_async_request *req, int error)
+{
+ struct qce_ahash_result *result = req->data;
+
+ if (error == -EINPROGRESS)
+ return;
+
+ result->error = error;
+ complete(&result->completion);
+}
+
+static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+ struct qce_ahash_result result;
+ struct ahash_request *req;
+ struct scatterlist sg;
+ unsigned int blocksize;
+ struct crypto_ahash *ahash_tfm;
+ u8 *buf;
+ int ret;
+ const char *alg_name;
+
+ blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ memset(ctx->authkey, 0, sizeof(ctx->authkey));
+
+ if (keylen <= blocksize) {
+ memcpy(ctx->authkey, key, keylen);
+ return 0;
+ }
+
+ if (digestsize == SHA1_DIGEST_SIZE)
+ alg_name = "sha1-qce";
+ else if (digestsize == SHA256_DIGEST_SIZE)
+ alg_name = "sha256-qce";
+ else
+ return -EINVAL;
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_free_ahash;
+ }
+
+ init_completion(&result.completion);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ qce_digest_complete, &result);
+ crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+ buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free_req;
+ }
+
+ memcpy(buf, key, keylen);
+ sg_init_one(&sg, buf, keylen);
+ ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
+
+ ret = crypto_ahash_digest(req);
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ ret = wait_for_completion_interruptible(&result.completion);
+ if (!ret)
+ ret = result.error;
+ }
+
+ if (ret)
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ kfree(buf);
+err_free_req:
+ ahash_request_free(req);
+err_free_ahash:
+ crypto_free_ahash(ahash_tfm);
+ return ret;
+}
+
+static int qce_ahash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx));
+ memset(ctx, 0, sizeof(*ctx));
+ return 0;
+}
+
+struct qce_ahash_def {
+ unsigned long flags;
+ const char *name;
+ const char *drv_name;
+ unsigned int digestsize;
+ unsigned int blocksize;
+ unsigned int statesize;
+ const u32 *std_iv;
+};
+
+static const struct qce_ahash_def ahash_def[] = {
+ {
+ .flags = QCE_HASH_SHA1,
+ .name = "sha1",
+ .drv_name = "sha1-qce",
+ .digestsize = SHA1_DIGEST_SIZE,
+ .blocksize = SHA1_BLOCK_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .std_iv = std_iv_sha1,
+ },
+ {
+ .flags = QCE_HASH_SHA256,
+ .name = "sha256",
+ .drv_name = "sha256-qce",
+ .digestsize = SHA256_DIGEST_SIZE,
+ .blocksize = SHA256_BLOCK_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .std_iv = std_iv_sha256,
+ },
+ {
+ .flags = QCE_HASH_SHA1_HMAC,
+ .name = "hmac(sha1)",
+ .drv_name = "hmac-sha1-qce",
+ .digestsize = SHA1_DIGEST_SIZE,
+ .blocksize = SHA1_BLOCK_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .std_iv = std_iv_sha1,
+ },
+ {
+ .flags = QCE_HASH_SHA256_HMAC,
+ .name = "hmac(sha256)",
+ .drv_name = "hmac-sha256-qce",
+ .digestsize = SHA256_DIGEST_SIZE,
+ .blocksize = SHA256_BLOCK_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .std_iv = std_iv_sha256,
+ },
+};
+
+static int qce_ahash_register_one(const struct qce_ahash_def *def,
+ struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl;
+ struct ahash_alg *alg;
+ struct crypto_alg *base;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl)
+ return -ENOMEM;
+
+ tmpl->std_iv = def->std_iv;
+
+ alg = &tmpl->alg.ahash;
+ alg->init = qce_ahash_init;
+ alg->update = qce_ahash_update;
+ alg->final = qce_ahash_final;
+ alg->digest = qce_ahash_digest;
+ alg->export = qce_ahash_export;
+ alg->import = qce_ahash_import;
+ if (IS_SHA_HMAC(def->flags))
+ alg->setkey = qce_ahash_hmac_setkey;
+ alg->halg.digestsize = def->digestsize;
+ alg->halg.statesize = def->statesize;
+
+ base = &alg->halg.base;
+ base->cra_blocksize = def->blocksize;
+ base->cra_priority = 300;
+ base->cra_flags = CRYPTO_ALG_ASYNC;
+ base->cra_ctxsize = sizeof(struct qce_sha_ctx);
+ base->cra_alignmask = 0;
+ base->cra_module = THIS_MODULE;
+ base->cra_init = qce_ahash_cra_init;
+ INIT_LIST_HEAD(&base->cra_list);
+
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+
+ INIT_LIST_HEAD(&tmpl->entry);
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
+ tmpl->alg_flags = def->flags;
+ tmpl->qce = qce;
+
+ ret = crypto_register_ahash(alg);
+ if (ret) {
+ kfree(tmpl);
+ dev_err(qce->dev, "%s registration failed\n", base->cra_name);
+ return ret;
+ }
+
+ list_add_tail(&tmpl->entry, &ahash_algs);
+ dev_dbg(qce->dev, "%s is registered\n", base->cra_name);
+ return 0;
+}
+
+static void qce_ahash_unregister(struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl, *n;
+
+ list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) {
+ crypto_unregister_ahash(&tmpl->alg.ahash);
+ list_del(&tmpl->entry);
+ kfree(tmpl);
+ }
+}
+
+static int qce_ahash_register(struct qce_device *qce)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(ahash_def); i++) {
+ ret = qce_ahash_register_one(&ahash_def[i], qce);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ qce_ahash_unregister(qce);
+ return ret;
+}
+
+const struct qce_algo_ops ahash_ops = {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .register_algs = qce_ahash_register,
+ .unregister_algs = qce_ahash_unregister,
+ .async_req_handle = qce_ahash_async_req_handle,
+};
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
new file mode 100644
index 000000000000..286f0d5397f3
--- /dev/null
+++ b/drivers/crypto/qce/sha.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SHA_H_
+#define _SHA_H_
+
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_SHA_MAX_BLOCKSIZE SHA256_BLOCK_SIZE
+#define QCE_SHA_MAX_DIGESTSIZE SHA256_DIGEST_SIZE
+
+struct qce_sha_ctx {
+ u8 authkey[QCE_SHA_MAX_BLOCKSIZE];
+};
+
+/**
+ * struct qce_sha_reqctx - holds private ahash objects per request
+ * @buf: used during update, import and export
+ * @tmpbuf: buffer for internal use
+ * @digest: calculated digest buffer
+ * @buflen: length of the buffer
+ * @flags: operation flags
+ * @src_orig: original request sg list
+ * @nbytes_orig: original request number of bytes
+ * @src_chained: is source scatterlist chained
+ * @src_nents: source number of entries
+ * @byte_count: byte count
+ * @count: save count in states during update, import and export
+ * @first_blk: is it the first block
+ * @last_blk: is it the last block
+ * @sg: used to chain sg lists
+ * @authkey: pointer to auth key in sha ctx
+ * @authklen: auth key length
+ * @result_sg: scatterlist used for result buffer
+ */
+struct qce_sha_reqctx {
+ u8 buf[QCE_SHA_MAX_BLOCKSIZE];
+ u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE];
+ u8 digest[QCE_SHA_MAX_DIGESTSIZE];
+ unsigned int buflen;
+ unsigned long flags;
+ struct scatterlist *src_orig;
+ unsigned int nbytes_orig;
+ bool src_chained;
+ int src_nents;
+ __be32 byte_count[2];
+ u64 count;
+ bool first_blk;
+ bool last_blk;
+ struct scatterlist sg[2];
+ u8 *authkey;
+ unsigned int authklen;
+ struct scatterlist result_sg;
+};
+
+static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
+ struct ahash_alg, halg);
+
+ return container_of(alg, struct qce_alg_template, alg.ahash);
+}
+
+extern const struct qce_algo_ops ahash_ops;
+
+#endif /* _SHA_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index a999f537228f..92105f3dc8e0 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
static irqreturn_t cryp_interrupt_handler(int irq, void *param)
{
struct cryp_ctx *ctx;
- int i;
+ int count;
struct cryp_device_data *device_data;
if (param == NULL) {
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
if (cryp_pending_irq_src(device_data,
CRYP_IRQ_SRC_OUTPUT_FIFO)) {
if (ctx->outlen / ctx->blocksize > 0) {
- for (i = 0; i < ctx->blocksize / 4; i++) {
- *(ctx->outdata) = readl_relaxed(
- &device_data->base->dout);
- ctx->outdata += 4;
- ctx->outlen -= 4;
- }
+ count = ctx->blocksize / 4;
+
+ readsl(&device_data->base->dout, ctx->outdata, count);
+ ctx->outdata += count;
+ ctx->outlen -= count;
if (ctx->outlen == 0) {
cryp_disable_irq_src(device_data,
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
} else if (cryp_pending_irq_src(device_data,
CRYP_IRQ_SRC_INPUT_FIFO)) {
if (ctx->datalen / ctx->blocksize > 0) {
- for (i = 0 ; i < ctx->blocksize / 4; i++) {
- writel_relaxed(ctx->indata,
- &device_data->base->din);
- ctx->indata += 4;
- ctx->datalen -= 4;
- }
+ count = ctx->blocksize / 4;
+
+ writesl(&device_data->base->din, ctx->indata, count);
+
+ ctx->indata += count;
+ ctx->datalen -= count;
if (ctx->datalen == 0)
cryp_disable_irq_src(device_data,