From 1a92b2ba339221a4afee43adf125fcc9a41353f7 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 14 Apr 2015 11:51:29 -0500 Subject: crypto: mv_cesa - ensure backlog is initialised backlog is not initialised so in the case where cpg->eng_st != ENGINE_IDLE it is never initialised and hence which could lead to an illegal memory dereference in the statement: backlog->complete(backlog, -EINPROGRESS); Discovered with cppcheck static analsys: [drivers/crypto/mv_cesa.c:616]: (error) Uninitialized variable: backlog Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/mv_cesa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index f91f15ddee92..e63efbd840b5 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -595,7 +595,7 @@ static int queue_manag(void *data) cpg->eng_st = ENGINE_IDLE; do { struct crypto_async_request *async_req = NULL; - struct crypto_async_request *backlog; + struct crypto_async_request *backlog = NULL; __set_current_state(TASK_INTERRUPTIBLE); -- cgit v1.2.3 From 2529bc371c4154edcf61d76b9691e5da961e23c5 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:49 +0200 Subject: crypto: talitos - Use zero entry to init descriptors ptrs to zero Do use zero_entry value to init the descriptors ptrs to zero instead of writing 0 in each field Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 857414afa29a..7bf1b2b9c268 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1373,9 +1373,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, int sg_count, ret; /* first DWORD empty */ - desc->ptr[0].len = 0; - to_talitos_ptr(&desc->ptr[0], 0); - desc->ptr[0].j_extent = 0; + desc->ptr[0] = zero_entry; /* cipher iv */ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); @@ -1445,9 +1443,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, DMA_FROM_DEVICE); /* last DWORD empty */ - desc->ptr[6].len = 0; - to_talitos_ptr(&desc->ptr[6], 0); - desc->ptr[6].j_extent = 0; + desc->ptr[6] = zero_entry; ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { -- cgit v1.2.3 From 032d197eaaea13d220d141cdbca69e519b7af145 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:51 +0200 Subject: crypto: talitos - Refactor the sg in/out chain allocation This patch refactors the handling of the input and output data that is quite similar in several functions Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 159 ++++++++++++++++++++++++----------------------- 1 file changed, 81 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 7bf1b2b9c268..5a7e345c523b 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1327,16 +1327,23 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, return 0; } +static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, + struct scatterlist *dst, unsigned int len, + struct talitos_edesc *edesc) +{ + talitos_sg_unmap(dev, edesc, src, dst); +} + static void common_nonsnoop_unmap(struct device *dev, struct talitos_edesc *edesc, struct ablkcipher_request *areq) { unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + + unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, areq->src, areq->dst); - if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); @@ -1358,6 +1365,65 @@ static void ablkcipher_done(struct device *dev, areq->base.complete(&areq->base, err); } +int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, + unsigned int len, struct talitos_edesc *edesc, + enum dma_data_direction dir, struct talitos_ptr *ptr) +{ + int sg_count; + + ptr->len = cpu_to_be16(len); + ptr->j_extent = 0; + + sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, + edesc->src_chained); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(src)); + } else { + sg_count = sg_to_link_tbl(src, sg_count, len, + &edesc->link_tbl[0]); + if (sg_count > 1) { + to_talitos_ptr(ptr, edesc->dma_link_tbl); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed */ + to_talitos_ptr(ptr, sg_dma_address(src)); + } + } + return sg_count; +} + +void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, + unsigned int len, struct talitos_edesc *edesc, + enum dma_data_direction dir, + struct talitos_ptr *ptr, int sg_count) +{ + ptr->len = cpu_to_be16(len); + ptr->j_extent = 0; + + if (dir != DMA_NONE) + sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, + dir, edesc->dst_chained); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(dst)); + } else { + struct talitos_ptr *link_tbl_ptr = + &edesc->link_tbl[edesc->src_nents + 1]; + + to_talitos_ptr(ptr, edesc->dma_link_tbl + + (edesc->src_nents + 1) * + sizeof(struct talitos_ptr)); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + } +} + static int common_nonsnoop(struct talitos_edesc *edesc, struct ablkcipher_request *areq, void (*callback) (struct device *dev, @@ -1387,56 +1453,16 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* * cipher in */ - desc->ptr[3].len = cpu_to_be16(cryptlen); - desc->ptr[3].j_extent = 0; - - sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, - (areq->src == areq->dst) ? DMA_BIDIRECTIONAL - : DMA_TO_DEVICE, - edesc->src_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); - } else { - sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, - &edesc->link_tbl[0]); - if (sg_count > 1) { - to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); - desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } else { - /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(&desc->ptr[3], - sg_dma_address(areq->src)); - } - } + sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, + (areq->src == areq->dst) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE, + &desc->ptr[3]); /* cipher out */ - desc->ptr[4].len = cpu_to_be16(cryptlen); - desc->ptr[4].j_extent = 0; - - if (areq->src != areq->dst) - sg_count = talitos_map_sg(dev, areq->dst, - edesc->dst_nents ? : 1, - DMA_FROM_DEVICE, edesc->dst_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); - } else { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[edesc->src_nents + 1]; - - to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + - (edesc->src_nents + 1) * - sizeof(struct talitos_ptr)); - desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; - sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, - link_tbl_ptr); - dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); - } + map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, + (areq->src == areq->dst) ? DMA_NONE + : DMA_FROM_DEVICE, + &desc->ptr[4], sg_count); /* iv out */ map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, @@ -1506,6 +1532,8 @@ static void common_nonsnoop_hash_unmap(struct device *dev, unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); + /* When using hashctx-in, must unmap it. */ if (edesc->desc.ptr[1].len) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], @@ -1515,8 +1543,6 @@ static void common_nonsnoop_hash_unmap(struct device *dev, unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); - if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); @@ -1555,7 +1581,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; - int sg_count, ret; + int ret; /* first DWORD empty */ desc->ptr[0] = zero_entry; @@ -1583,31 +1609,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* * data in */ - desc->ptr[3].len = cpu_to_be16(length); - desc->ptr[3].j_extent = 0; - - sg_count = talitos_map_sg(dev, req_ctx->psrc, - edesc->src_nents ? : 1, - DMA_TO_DEVICE, edesc->src_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); - } else { - sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length, - &edesc->link_tbl[0]); - if (sg_count > 1) { - desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; - to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); - dma_sync_single_for_device(ctx->dev, - edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } else { - /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(&desc->ptr[3], - sg_dma_address(req_ctx->psrc)); - } - } + map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, + DMA_TO_DEVICE, &desc->ptr[3]); /* fifth DWORD empty */ desc->ptr[4] = zero_entry; -- cgit v1.2.3 From edc6bd698a4d335db0962aac8ab07f2840786a02 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:53 +0200 Subject: crypto: talitos - talitos_ptr renamed ptr for more lisibility Linux CodyingStyle recommends to use short variables for local variables. ptr is just good enough for those 3 lines functions. It helps keep single lines shorter than 80 characters. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 5a7e345c523b..fca0aedcbc12 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -55,37 +55,37 @@ #include "talitos.h" -static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) +static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr) { - talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); - talitos_ptr->eptr = upper_32_bits(dma_addr); + ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); + ptr->eptr = upper_32_bits(dma_addr); } /* * map virtual single (contiguous) pointer to h/w descriptor pointer */ static void map_single_talitos_ptr(struct device *dev, - struct talitos_ptr *talitos_ptr, + struct talitos_ptr *ptr, unsigned short len, void *data, unsigned char extent, enum dma_data_direction dir) { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); - talitos_ptr->len = cpu_to_be16(len); - to_talitos_ptr(talitos_ptr, dma_addr); - talitos_ptr->j_extent = extent; + ptr->len = cpu_to_be16(len); + to_talitos_ptr(ptr, dma_addr); + ptr->j_extent = extent; } /* * unmap bus single (contiguous) h/w descriptor pointer */ static void unmap_single_talitos_ptr(struct device *dev, - struct talitos_ptr *talitos_ptr, + struct talitos_ptr *ptr, enum dma_data_direction dir) { - dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr), - be16_to_cpu(talitos_ptr->len), dir); + dma_unmap_single(dev, be32_to_cpu(ptr->ptr), + be16_to_cpu(ptr->len), dir); } static int reset_channel(struct device *dev, int ch) -- cgit v1.2.3 From 185eb79f6a6536c2b61d2638e138ca439bd327c3 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:55 +0200 Subject: crypto: talitos - Add a helper function to clear j_extent field j_extent field is specific to SEC2 so we add a helper function to clear it so that SEC1 can redefine that function as nop Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index fca0aedcbc12..c93f79b8bef8 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -61,6 +61,11 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr) ptr->eptr = upper_32_bits(dma_addr); } +static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr) +{ + ptr->j_extent = 0; +} + /* * map virtual single (contiguous) pointer to h/w descriptor pointer */ @@ -1372,7 +1377,7 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, int sg_count; ptr->len = cpu_to_be16(len); - ptr->j_extent = 0; + to_talitos_ptr_extent_clear(ptr); sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, edesc->src_chained); @@ -1402,7 +1407,7 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, struct talitos_ptr *ptr, int sg_count) { ptr->len = cpu_to_be16(len); - ptr->j_extent = 0; + to_talitos_ptr_extent_clear(ptr); if (dir != DMA_NONE) sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, @@ -1444,7 +1449,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* cipher iv */ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); desc->ptr[1].len = cpu_to_be16(ivsize); - desc->ptr[1].j_extent = 0; + to_talitos_ptr_extent_clear(&desc->ptr[1]); /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, -- cgit v1.2.3 From a2b35aa86eeaca6c1d416e3c43d44222c34727fb Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:57 +0200 Subject: crypto: talitos - remove param 'extent' in map_single_talitos_ptr() map_single_talitos_ptr() is always called with extent == 0, so lets remove this unused parameter Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c93f79b8bef8..81e5636a2a4e 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -72,14 +72,13 @@ static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr) static void map_single_talitos_ptr(struct device *dev, struct talitos_ptr *ptr, unsigned short len, void *data, - unsigned char extent, enum dma_data_direction dir) { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); ptr->len = cpu_to_be16(len); to_talitos_ptr(ptr, dma_addr); - ptr->j_extent = extent; + to_talitos_ptr_extent_clear(ptr); } /* @@ -958,7 +957,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* hmac key */ map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, - 0, DMA_TO_DEVICE); + DMA_TO_DEVICE); /* hmac data */ desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); @@ -1002,7 +1001,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, - (char *)&ctx->key + ctx->authkeylen, 0, + (char *)&ctx->key + ctx->authkeylen, DMA_TO_DEVICE); /* @@ -1080,7 +1079,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, } /* iv out */ - map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, DMA_FROM_DEVICE); ret = talitos_submit(dev, ctx->ch, desc, callback, areq); @@ -1453,7 +1452,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, - (char *)&ctx->key, 0, DMA_TO_DEVICE); + (char *)&ctx->key, DMA_TO_DEVICE); /* * cipher in @@ -1470,7 +1469,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, &desc->ptr[4], sg_count); /* iv out */ - map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, + map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, DMA_FROM_DEVICE); /* last DWORD empty */ @@ -1595,7 +1594,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, if (!req_ctx->first || req_ctx->swinit) { map_single_talitos_ptr(dev, &desc->ptr[1], req_ctx->hw_context_size, - (char *)req_ctx->hw_context, 0, + (char *)req_ctx->hw_context, DMA_TO_DEVICE); req_ctx->swinit = 0; } else { @@ -1607,7 +1606,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* HMAC key */ if (ctx->keylen) map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, - (char *)&ctx->key, 0, DMA_TO_DEVICE); + (char *)&ctx->key, DMA_TO_DEVICE); else desc->ptr[2] = zero_entry; @@ -1624,11 +1623,11 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, if (req_ctx->last) map_single_talitos_ptr(dev, &desc->ptr[5], crypto_ahash_digestsize(tfm), - areq->result, 0, DMA_FROM_DEVICE); + areq->result, DMA_FROM_DEVICE); else map_single_talitos_ptr(dev, &desc->ptr[5], req_ctx->hw_context_size, - req_ctx->hw_context, 0, DMA_FROM_DEVICE); + req_ctx->hw_context, DMA_FROM_DEVICE); /* last DWORD empty */ desc->ptr[6] = zero_entry; -- cgit v1.2.3 From 538caf83374c0b24e2dfe2ca381354f207ca7cde Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:59 +0200 Subject: crypto: talitos - helper function for ptr len This patch adds a helper function for reads and writes of the len param of the talitos descriptor. This will help implement SEC1 later. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 81e5636a2a4e..bca6dedb357d 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -61,6 +61,16 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr) ptr->eptr = upper_32_bits(dma_addr); } +static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len) +{ + ptr->len = cpu_to_be16(len); +} + +static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr) +{ + return be16_to_cpu(ptr->len); +} + static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr) { ptr->j_extent = 0; @@ -76,7 +86,7 @@ static void map_single_talitos_ptr(struct device *dev, { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); - ptr->len = cpu_to_be16(len); + to_talitos_ptr_len(ptr, len); to_talitos_ptr(ptr, dma_addr); to_talitos_ptr_extent_clear(ptr); } @@ -89,7 +99,7 @@ static void unmap_single_talitos_ptr(struct device *dev, enum dma_data_direction dir) { dma_unmap_single(dev, be32_to_cpu(ptr->ptr), - be16_to_cpu(ptr->len), dir); + from_talitos_ptr_len(ptr), dir); } static int reset_channel(struct device *dev, int ch) @@ -1375,7 +1385,7 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, { int sg_count; - ptr->len = cpu_to_be16(len); + to_talitos_ptr_len(ptr, len); to_talitos_ptr_extent_clear(ptr); sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, @@ -1405,7 +1415,7 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, enum dma_data_direction dir, struct talitos_ptr *ptr, int sg_count) { - ptr->len = cpu_to_be16(len); + to_talitos_ptr_len(ptr, len); to_talitos_ptr_extent_clear(ptr); if (dir != DMA_NONE) @@ -1447,7 +1457,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* cipher iv */ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); - desc->ptr[1].len = cpu_to_be16(ivsize); + to_talitos_ptr_len(&desc->ptr[1], ivsize); to_talitos_ptr_extent_clear(&desc->ptr[1]); /* cipher key */ @@ -1539,11 +1549,11 @@ static void common_nonsnoop_hash_unmap(struct device *dev, unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); /* When using hashctx-in, must unmap it. */ - if (edesc->desc.ptr[1].len) + if (from_talitos_ptr_len(&edesc->desc.ptr[1])) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); - if (edesc->desc.ptr[2].len) + if (from_talitos_ptr_len(&edesc->desc.ptr[2])) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); -- cgit v1.2.3 From 90490752eb03ddc1015233034ec26816f0ea0de3 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:01 +0200 Subject: crypto: talitos - enhanced talitos_desc struct for SEC1 This patch enhances the talitos_desc struct with fields for SEC1. SEC1 has only one header field, and has a 'next_desc' field in addition. This mixed descriptor will continue to fit SEC2, and for SEC1 we will recopy hdr value into hdr1 value in talitos_submit() Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.h | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 61a14054aa39..f078da1d387a 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -37,9 +37,17 @@ /* descriptor pointer entry */ struct talitos_ptr { - __be16 len; /* length */ - u8 j_extent; /* jump to sg link table and/or extent */ - u8 eptr; /* extended address */ + union { + struct { /* SEC2 format */ + __be16 len; /* length */ + u8 j_extent; /* jump to sg link table and/or extent*/ + u8 eptr; /* extended address */ + }; + struct { /* SEC1 format */ + __be16 res; + __be16 len1; /* length */ + }; + }; __be32 ptr; /* address */ }; @@ -53,8 +61,12 @@ static const struct talitos_ptr zero_entry = { /* descriptor */ struct talitos_desc { __be32 hdr; /* header high bits */ - __be32 hdr_lo; /* header low bits */ + union { + __be32 hdr_lo; /* header low bits */ + __be32 hdr1; /* header for SEC1 */ + }; struct talitos_ptr ptr[7]; /* ptr/len pair array */ + __be32 next_desc; /* next descriptor (SEC1) */ }; /** -- cgit v1.2.3 From 5b841a65dcae3ac76dc34e56c948c629de48c1a3 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:03 +0200 Subject: crypto: talitos - add sub-choice in talitos CONFIG for SEC1 This patch adds a CONFIG option to select SEC1, SEC2+ or both. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 800bf41718e1..8a76a012be61 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -222,6 +222,24 @@ config CRYPTO_DEV_TALITOS To compile this driver as a module, choose M here: the module will be called talitos. +config CRYPTO_DEV_TALITOS1 + bool "SEC1 (SEC 1.0 and SEC Lite 1.2)" + depends on CRYPTO_DEV_TALITOS + depends on PPC_8xx || PPC_82xx + default y + help + Say 'Y' here to use the Freescale Security Engine (SEC) version 1.0 + found on MPC82xx or the Freescale Security Engine (SEC Lite) + version 1.2 found on MPC8xx + +config CRYPTO_DEV_TALITOS2 + bool "SEC2+ (SEC version 2.0 or upper)" + depends on CRYPTO_DEV_TALITOS + default y if !PPC_8xx + help + Say 'Y' here to use the Freescale Security Engine (SEC) + version 2 and following as found on MPC83xx, MPC85xx, etc ... + config CRYPTO_DEV_IXP4XX tristate "Driver for IXP4xx crypto hardware acceleration" depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE -- cgit v1.2.3 From 21590888490ce2a46ff4703b1503f562f4a59571 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:05 +0200 Subject: crypto: talitos - Add a feature to tag SEC1 We add a new feature in the features field, to mark compatible "fsl,sec1.0" We also define a helper function called has_ftr_sec1() to help functions quickly determine if they are running on SEC1 or SEC2+. When only SEC1 or SEC2 is compiled in, has_ftr_sec1() return trivial corresponding value. If both are compiled in, feature field is checked. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 3 +++ drivers/crypto/talitos.h | 17 +++++++++++++++++ 2 files changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index bca6dedb357d..db950236183d 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2709,6 +2709,9 @@ static int talitos_probe(struct platform_device *ofdev) TALITOS_FTR_SHA224_HWINIT | TALITOS_FTR_HMAC_OK; + if (of_device_is_compatible(np, "fsl,sec1.0")) + priv->features |= TALITOS_FTR_SEC1; + priv->chan = kzalloc(sizeof(struct talitos_channel) * priv->num_channels, GFP_KERNEL); if (!priv->chan) { diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index f078da1d387a..b0bdb4ec79fc 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -156,6 +156,23 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 #define TALITOS_FTR_SHA224_HWINIT 0x00000004 #define TALITOS_FTR_HMAC_OK 0x00000008 +#define TALITOS_FTR_SEC1 0x00000010 + +/* + * If both CONFIG_CRYPTO_DEV_TALITOS1 and CONFIG_CRYPTO_DEV_TALITOS2 are + * defined, we check the features which are set according to the device tree. + * Otherwise, we answer true or false directly + */ +static inline bool has_ftr_sec1(struct talitos_private *priv) +{ +#if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2) + return priv->features & TALITOS_FTR_SEC1 ? true : false; +#elif defined(CONFIG_CRYPTO_DEV_TALITOS1) + return true; +#else + return false; +#endif +} /* * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register -- cgit v1.2.3 From 922f9dc8d36651abea51c4f2ff9c08ba4db692ff Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:07 +0200 Subject: crypto: talitos - fill in talitos descriptor iaw SEC1 or SEC2+ talitos descriptor is slightly different for SEC1 and SEC2+, so lets the helper function that fills the descriptor take into account the type of SEC. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 105 ++++++++++++++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index db950236183d..678b5280e7a8 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -55,25 +55,38 @@ #include "talitos.h" -static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr) +static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, + bool is_sec1) { ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); - ptr->eptr = upper_32_bits(dma_addr); + if (!is_sec1) + ptr->eptr = upper_32_bits(dma_addr); } -static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len) +static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len, + bool is_sec1) { - ptr->len = cpu_to_be16(len); + if (is_sec1) { + ptr->res = 0; + ptr->len1 = cpu_to_be16(len); + } else { + ptr->len = cpu_to_be16(len); + } } -static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr) +static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, + bool is_sec1) { - return be16_to_cpu(ptr->len); + if (is_sec1) + return be16_to_cpu(ptr->len1); + else + return be16_to_cpu(ptr->len); } -static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr) +static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) { - ptr->j_extent = 0; + if (!is_sec1) + ptr->j_extent = 0; } /* @@ -85,10 +98,12 @@ static void map_single_talitos_ptr(struct device *dev, enum dma_data_direction dir) { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); - to_talitos_ptr_len(ptr, len); - to_talitos_ptr(ptr, dma_addr); - to_talitos_ptr_extent_clear(ptr); + to_talitos_ptr_len(ptr, len, is_sec1); + to_talitos_ptr(ptr, dma_addr, is_sec1); + to_talitos_ptr_extent_clear(ptr, is_sec1); } /* @@ -98,8 +113,11 @@ static void unmap_single_talitos_ptr(struct device *dev, struct talitos_ptr *ptr, enum dma_data_direction dir) { + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + dma_unmap_single(dev, be32_to_cpu(ptr->ptr), - from_talitos_ptr_len(ptr), dir); + from_talitos_ptr_len(ptr, is_sec1), dir); } static int reset_channel(struct device *dev, int ch) @@ -922,7 +940,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, int n_sg = sg_count; while (n_sg--) { - to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); + to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0); link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); link_tbl_ptr->j_extent = 0; link_tbl_ptr++; @@ -976,7 +994,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * - sizeof(struct talitos_ptr)); + sizeof(struct talitos_ptr), 0); desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; /* assoc_nents - 1 entries for assoc, 1 for IV */ @@ -987,7 +1005,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, tbl_ptr += sg_count - 1; tbl_ptr->j_extent = 0; tbl_ptr++; - to_talitos_ptr(tbl_ptr, edesc->iv_dma); + to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0); tbl_ptr->len = cpu_to_be16(ivsize); tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; @@ -996,14 +1014,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, } else { if (areq->assoclen) to_talitos_ptr(&desc->ptr[1], - sg_dma_address(areq->assoc)); + sg_dma_address(areq->assoc), 0); else - to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0); desc->ptr[1].j_extent = 0; } /* cipher iv */ - to_talitos_ptr(&desc->ptr[2], edesc->iv_dma); + to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); desc->ptr[2].len = cpu_to_be16(ivsize); desc->ptr[2].j_extent = 0; /* Sync needed for the aead_givencrypt case */ @@ -1029,7 +1047,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, edesc->src_chained); if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); } else { sg_link_tbl_len = cryptlen; @@ -1040,14 +1058,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, &edesc->link_tbl[0]); if (sg_count > 1) { desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; - to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0); dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); } else { /* Only one segment now, so no link tbl needed */ to_talitos_ptr(&desc->ptr[4], - sg_dma_address(areq->src)); + sg_dma_address(areq->src), 0); } } @@ -1061,13 +1079,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, DMA_FROM_DEVICE, edesc->dst_chained); if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); } else { int tbl_off = edesc->src_nents + 1; struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + - tbl_off * sizeof(struct talitos_ptr)); + tbl_off * sizeof(struct talitos_ptr), 0); sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, tbl_ptr); @@ -1082,7 +1100,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + (tbl_off + edesc->dst_nents + 1 + edesc->assoc_nents) * - sizeof(struct talitos_ptr)); + sizeof(struct talitos_ptr), 0); desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); @@ -1384,27 +1402,29 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, enum dma_data_direction dir, struct talitos_ptr *ptr) { int sg_count; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); - to_talitos_ptr_len(ptr, len); - to_talitos_ptr_extent_clear(ptr); + to_talitos_ptr_len(ptr, len, is_sec1); + to_talitos_ptr_extent_clear(ptr, is_sec1); sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, edesc->src_chained); if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(src)); + to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); } else { sg_count = sg_to_link_tbl(src, sg_count, len, &edesc->link_tbl[0]); if (sg_count > 1) { - to_talitos_ptr(ptr, edesc->dma_link_tbl); + to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); } else { /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(ptr, sg_dma_address(src)); + to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); } } return sg_count; @@ -1415,22 +1435,25 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, enum dma_data_direction dir, struct talitos_ptr *ptr, int sg_count) { - to_talitos_ptr_len(ptr, len); - to_talitos_ptr_extent_clear(ptr); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + to_talitos_ptr_len(ptr, len, is_sec1); + to_talitos_ptr_extent_clear(ptr, is_sec1); if (dir != DMA_NONE) sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, dir, edesc->dst_chained); if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(dst)); + to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); } else { struct talitos_ptr *link_tbl_ptr = &edesc->link_tbl[edesc->src_nents + 1]; to_talitos_ptr(ptr, edesc->dma_link_tbl + (edesc->src_nents + 1) * - sizeof(struct talitos_ptr)); + sizeof(struct talitos_ptr), 0); ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); dma_sync_single_for_device(dev, edesc->dma_link_tbl, @@ -1451,14 +1474,16 @@ static int common_nonsnoop(struct talitos_edesc *edesc, unsigned int cryptlen = areq->nbytes; unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); int sg_count, ret; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); /* first DWORD empty */ desc->ptr[0] = zero_entry; /* cipher iv */ - to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); - to_talitos_ptr_len(&desc->ptr[1], ivsize); - to_talitos_ptr_extent_clear(&desc->ptr[1]); + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); + to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); + to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, @@ -1543,17 +1568,19 @@ static void common_nonsnoop_hash_unmap(struct device *dev, struct ahash_request *areq) { struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); /* When using hashctx-in, must unmap it. */ - if (from_talitos_ptr_len(&edesc->desc.ptr[1])) + if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); - if (from_talitos_ptr_len(&edesc->desc.ptr[2])) + if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); @@ -1596,6 +1623,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; int ret; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); /* first DWORD empty */ desc->ptr[0] = zero_entry; -- cgit v1.2.3 From 7d607c6a71f7a4905831fc2b1636e080533ab2db Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:09 +0200 Subject: crypto: talitos - adaptation of talitos_submit() for SEC1 SEC1 descriptor is a bit different to SEC2+ descriptor. talitos_submit() will have to copy hdr field into hdr1 field and send the descriptor starting at hdr1 up to next_desc. For SEC2, it remains unchanged and next_desc is just ignored. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 23 +++++++++++++++++++---- drivers/crypto/talitos.h | 2 ++ 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 678b5280e7a8..e6ea6510d8f2 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -236,6 +236,7 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, struct talitos_request *request; unsigned long flags; int head; + bool is_sec1 = has_ftr_sec1(priv); spin_lock_irqsave(&priv->chan[ch].head_lock, flags); @@ -249,8 +250,17 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, request = &priv->chan[ch].fifo[head]; /* map descriptor and save caller data */ - request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), - DMA_BIDIRECTIONAL); + if (is_sec1) { + desc->hdr1 = desc->hdr; + desc->next_desc = 0; + request->dma_desc = dma_map_single(dev, &desc->hdr1, + TALITOS_DESC_SIZE, + DMA_BIDIRECTIONAL); + } else { + request->dma_desc = dma_map_single(dev, desc, + TALITOS_DESC_SIZE, + DMA_BIDIRECTIONAL); + } request->callback = callback; request->context = context; @@ -282,16 +292,21 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) struct talitos_request *request, saved_req; unsigned long flags; int tail, status; + bool is_sec1 = has_ftr_sec1(priv); spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); tail = priv->chan[ch].tail; while (priv->chan[ch].fifo[tail].desc) { + __be32 hdr; + request = &priv->chan[ch].fifo[tail]; /* descriptors with their done bits set don't get the error */ rmb(); - if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) + hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; + + if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) status = 0; else if (!error) @@ -300,7 +315,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) status = error; dma_unmap_single(dev, request->dma_desc, - sizeof(struct talitos_desc), + TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL); /* copy entries so we can call callback outside lock */ diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index b0bdb4ec79fc..f827c04a6d1a 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -69,6 +69,8 @@ struct talitos_desc { __be32 next_desc; /* next descriptor (SEC1) */ }; +#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) + /** * talitos_request - descriptor submission request * @desc: descriptor pointer (kernel virtual) -- cgit v1.2.3 From 5fa7fa147b1572ae724819631ef442e1c9570f9a Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:11 +0200 Subject: crypto: talitos - base address for Execution Units SEC 1.0, 1.2 and 2.x+ have different EU base addresses, so we need to define pointers for each EU in the driver private data structure. The proper address is set by the probe function depending on the SEC type, in order to provide access to the proper address. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 83 ++++++++++++++++++++++++++++++++---------------- drivers/crypto/talitos.h | 72 +++++++++++++++++++++++++---------------- 2 files changed, 100 insertions(+), 55 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index e6ea6510d8f2..6d77699a821b 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -208,7 +208,7 @@ static int init_device(struct device *dev) /* disable integrity check error interrupts (use writeback instead) */ if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) - setbits32(priv->reg + TALITOS_MDEUICR_LO, + setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, TALITOS_MDEUICR_LO_ICE); return 0; @@ -424,44 +424,44 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) switch (desc_hdr & DESC_HDR_SEL0_MASK) { case DESC_HDR_SEL0_AFEU: dev_err(dev, "AFEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_AFEUISR), - in_be32(priv->reg + TALITOS_AFEUISR_LO)); + in_be32(priv->reg_afeu + TALITOS_EUISR), + in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_DEU: dev_err(dev, "DEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_DEUISR), - in_be32(priv->reg + TALITOS_DEUISR_LO)); + in_be32(priv->reg_deu + TALITOS_EUISR), + in_be32(priv->reg_deu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_MDEUA: case DESC_HDR_SEL0_MDEUB: dev_err(dev, "MDEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_MDEUISR), - in_be32(priv->reg + TALITOS_MDEUISR_LO)); + in_be32(priv->reg_mdeu + TALITOS_EUISR), + in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_RNG: dev_err(dev, "RNGUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_RNGUISR), - in_be32(priv->reg + TALITOS_RNGUISR_LO)); + in_be32(priv->reg_rngu + TALITOS_ISR), + in_be32(priv->reg_rngu + TALITOS_ISR_LO)); break; case DESC_HDR_SEL0_PKEU: dev_err(dev, "PKEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_PKEUISR), - in_be32(priv->reg + TALITOS_PKEUISR_LO)); + in_be32(priv->reg_pkeu + TALITOS_EUISR), + in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_AESU: dev_err(dev, "AESUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_AESUISR), - in_be32(priv->reg + TALITOS_AESUISR_LO)); + in_be32(priv->reg_aesu + TALITOS_EUISR), + in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_CRCU: dev_err(dev, "CRCUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_CRCUISR), - in_be32(priv->reg + TALITOS_CRCUISR_LO)); + in_be32(priv->reg_crcu + TALITOS_EUISR), + in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_KEU: dev_err(dev, "KEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_KEUISR), - in_be32(priv->reg + TALITOS_KEUISR_LO)); + in_be32(priv->reg_pkeu + TALITOS_EUISR), + in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); break; } @@ -469,13 +469,13 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) case DESC_HDR_SEL1_MDEUA: case DESC_HDR_SEL1_MDEUB: dev_err(dev, "MDEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_MDEUISR), - in_be32(priv->reg + TALITOS_MDEUISR_LO)); + in_be32(priv->reg_mdeu + TALITOS_EUISR), + in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL1_CRCU: dev_err(dev, "CRCUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_CRCUISR), - in_be32(priv->reg + TALITOS_CRCUISR_LO)); + in_be32(priv->reg_crcu + TALITOS_EUISR), + in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); break; } @@ -614,7 +614,7 @@ static int talitos_rng_data_present(struct hwrng *rng, int wait) int i; for (i = 0; i < 20; i++) { - ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) & + ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & TALITOS_RNGUSR_LO_OFL; if (ofl || !wait) break; @@ -630,8 +630,8 @@ static int talitos_rng_data_read(struct hwrng *rng, u32 *data) struct talitos_private *priv = dev_get_drvdata(dev); /* rng fifo requires 64-bit accesses */ - *data = in_be32(priv->reg + TALITOS_RNGU_FIFO); - *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO); + *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); + *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); return sizeof(u32); } @@ -642,8 +642,9 @@ static int talitos_rng_init(struct hwrng *rng) struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR); - while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD) + setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); + while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) + & TALITOS_RNGUSR_LO_RD) && --timeout) cpu_relax(); if (timeout == 0) { @@ -652,7 +653,7 @@ static int talitos_rng_init(struct hwrng *rng) } /* start generating */ - setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0); + setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); return 0; } @@ -2687,6 +2688,7 @@ static int talitos_probe(struct platform_device *ofdev) struct talitos_private *priv; const unsigned int *prop; int i, err; + int stride; priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); if (!priv) @@ -2756,6 +2758,31 @@ static int talitos_probe(struct platform_device *ofdev) if (of_device_is_compatible(np, "fsl,sec1.0")) priv->features |= TALITOS_FTR_SEC1; + if (of_device_is_compatible(np, "fsl,sec1.2")) { + priv->reg_deu = priv->reg + TALITOS12_DEU; + priv->reg_aesu = priv->reg + TALITOS12_AESU; + priv->reg_mdeu = priv->reg + TALITOS12_MDEU; + stride = TALITOS1_CH_STRIDE; + } else if (of_device_is_compatible(np, "fsl,sec1.0")) { + priv->reg_deu = priv->reg + TALITOS10_DEU; + priv->reg_aesu = priv->reg + TALITOS10_AESU; + priv->reg_mdeu = priv->reg + TALITOS10_MDEU; + priv->reg_afeu = priv->reg + TALITOS10_AFEU; + priv->reg_rngu = priv->reg + TALITOS10_RNGU; + priv->reg_pkeu = priv->reg + TALITOS10_PKEU; + stride = TALITOS1_CH_STRIDE; + } else { + priv->reg_deu = priv->reg + TALITOS2_DEU; + priv->reg_aesu = priv->reg + TALITOS2_AESU; + priv->reg_mdeu = priv->reg + TALITOS2_MDEU; + priv->reg_afeu = priv->reg + TALITOS2_AFEU; + priv->reg_rngu = priv->reg + TALITOS2_RNGU; + priv->reg_pkeu = priv->reg + TALITOS2_PKEU; + priv->reg_keu = priv->reg + TALITOS2_KEU; + priv->reg_crcu = priv->reg + TALITOS2_CRCU; + stride = TALITOS2_CH_STRIDE; + } + priv->chan = kzalloc(sizeof(struct talitos_channel) * priv->num_channels, GFP_KERNEL); if (!priv->chan) { @@ -2767,7 +2794,7 @@ static int talitos_probe(struct platform_device *ofdev) priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); for (i = 0; i < priv->num_channels; i++) { - priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1); + priv->chan[i].reg = priv->reg + stride * (i + 1); if (!priv->irq[1] || !(i & 1)) priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index f827c04a6d1a..4faa3b6fc6db 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -111,6 +111,14 @@ struct talitos_private { struct device *dev; struct platform_device *ofdev; void __iomem *reg; + void __iomem *reg_deu; + void __iomem *reg_aesu; + void __iomem *reg_mdeu; + void __iomem *reg_afeu; + void __iomem *reg_rngu; + void __iomem *reg_pkeu; + void __iomem *reg_keu; + void __iomem *reg_crcu; int irq[2]; /* SEC global registers lock */ @@ -206,7 +214,8 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) /* channel register address stride */ #define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */ -#define TALITOS_CH_STRIDE 0x100 +#define TALITOS1_CH_STRIDE 0x1000 +#define TALITOS2_CH_STRIDE 0x100 /* channel configuration register */ #define TALITOS_CCCR 0x8 @@ -255,37 +264,46 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) #define TALITOS_SCATTER 0xe0 #define TALITOS_SCATTER_LO 0xe4 +/* execution unit registers base */ +#define TALITOS2_DEU 0x2000 +#define TALITOS2_AESU 0x4000 +#define TALITOS2_MDEU 0x6000 +#define TALITOS2_AFEU 0x8000 +#define TALITOS2_RNGU 0xa000 +#define TALITOS2_PKEU 0xc000 +#define TALITOS2_KEU 0xe000 +#define TALITOS2_CRCU 0xf000 + +#define TALITOS12_AESU 0x4000 +#define TALITOS12_DEU 0x5000 +#define TALITOS12_MDEU 0x6000 + +#define TALITOS10_AFEU 0x8000 +#define TALITOS10_DEU 0xa000 +#define TALITOS10_MDEU 0xc000 +#define TALITOS10_RNGU 0xe000 +#define TALITOS10_PKEU 0x10000 +#define TALITOS10_AESU 0x12000 + /* execution unit interrupt status registers */ -#define TALITOS_DEUISR 0x2030 /* DES unit */ -#define TALITOS_DEUISR_LO 0x2034 -#define TALITOS_AESUISR 0x4030 /* AES unit */ -#define TALITOS_AESUISR_LO 0x4034 -#define TALITOS_MDEUISR 0x6030 /* message digest unit */ -#define TALITOS_MDEUISR_LO 0x6034 -#define TALITOS_MDEUICR 0x6038 /* interrupt control */ -#define TALITOS_MDEUICR_LO 0x603c +#define TALITOS_EUDSR 0x10 /* data size */ +#define TALITOS_EUDSR_LO 0x14 +#define TALITOS_EURCR 0x18 /* reset control*/ +#define TALITOS_EURCR_LO 0x1c +#define TALITOS_EUSR 0x28 /* rng status */ +#define TALITOS_EUSR_LO 0x2c +#define TALITOS_EUISR 0x30 +#define TALITOS_EUISR_LO 0x34 +#define TALITOS_EUICR 0x38 /* int. control */ +#define TALITOS_EUICR_LO 0x3c +#define TALITOS_EU_FIFO 0x800 /* output FIFO */ +#define TALITOS_EU_FIFO_LO 0x804 /* output FIFO */ +/* message digest unit */ #define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ -#define TALITOS_AFEUISR 0x8030 /* arc4 unit */ -#define TALITOS_AFEUISR_LO 0x8034 -#define TALITOS_RNGUISR 0xa030 /* random number unit */ -#define TALITOS_RNGUISR_LO 0xa034 -#define TALITOS_RNGUSR 0xa028 /* rng status */ -#define TALITOS_RNGUSR_LO 0xa02c +/* random number unit */ #define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */ #define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */ -#define TALITOS_RNGUDSR 0xa010 /* data size */ -#define TALITOS_RNGUDSR_LO 0xa014 -#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */ -#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */ -#define TALITOS_RNGURCR 0xa018 /* reset control */ -#define TALITOS_RNGURCR_LO 0xa01c #define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */ -#define TALITOS_PKEUISR 0xc030 /* public key unit */ -#define TALITOS_PKEUISR_LO 0xc034 -#define TALITOS_KEUISR 0xe030 /* kasumi unit */ -#define TALITOS_KEUISR_LO 0xe034 -#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ -#define TALITOS_CRCUISR_LO 0xf034 #define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 #define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 -- cgit v1.2.3 From dd3c0987f5426d2df4a0c92de82dac65874bb9a4 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:13 +0200 Subject: crypto: talitos - adapt interrupts and reset functions to SEC1 This patch adapts the interrupts handling and reset function for SEC1. On SEC1, registers are almost similar to SEC2+, but bits are sometimes located at different places. So we need to define TALITOS1 and TALITOS2 versions of some fields, and manage according to whether it is SEC1 or SEC2. On SEC1, only one interrupt vector is dedicated to the SEC, so only interrupt_4ch is needed. On SEC1, interrupts are enabled by clearing related bits in IMR, while on SEC2, interrupts are enabled by seting the bits in IMR. SEC1 also performs parity verification in the DES Unit. We have to disable this feature because the test vectors provided in the kernel have parity errors. In reset functions, only SEC2 supports continuation after error. For SEC1, we have to reset in all cases. For errors handling, SEC2+ names have been kept, but displayed text have been amended to reflect exact meaning on SEC1. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 227 +++++++++++++++++++++++++++++++++++------------ drivers/crypto/talitos.h | 39 +++++--- 2 files changed, 199 insertions(+), 67 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6d77699a821b..12654056ca53 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -124,12 +124,23 @@ static int reset_channel(struct device *dev, int ch) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; + bool is_sec1 = has_ftr_sec1(priv); - setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET); + if (is_sec1) { + setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, + TALITOS1_CCCR_LO_RESET); - while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET) - && --timeout) - cpu_relax(); + while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & + TALITOS1_CCCR_LO_RESET) && --timeout) + cpu_relax(); + } else { + setbits32(priv->chan[ch].reg + TALITOS_CCCR, + TALITOS2_CCCR_RESET); + + while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & + TALITOS2_CCCR_RESET) && --timeout) + cpu_relax(); + } if (timeout == 0) { dev_err(dev, "failed to reset channel %d\n", ch); @@ -152,11 +163,12 @@ static int reset_device(struct device *dev) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - u32 mcr = TALITOS_MCR_SWR; + bool is_sec1 = has_ftr_sec1(priv); + u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; setbits32(priv->reg + TALITOS_MCR, mcr); - while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR) + while ((in_be32(priv->reg + TALITOS_MCR) & mcr) && --timeout) cpu_relax(); @@ -180,6 +192,7 @@ static int init_device(struct device *dev) { struct talitos_private *priv = dev_get_drvdata(dev); int ch, err; + bool is_sec1 = has_ftr_sec1(priv); /* * Master reset @@ -203,8 +216,15 @@ static int init_device(struct device *dev) } /* enable channel done and error interrupts */ - setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); - setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); + if (is_sec1) { + clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); + clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); + /* disable parity error check in DEU (erroneous? test vect.) */ + setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); + } else { + setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); + } /* disable integrity check error interrupts (use writeback instead) */ if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) @@ -349,8 +369,37 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) /* * process completed requests for channels that have done status */ -#define DEF_TALITOS_DONE(name, ch_done_mask) \ -static void talitos_done_##name(unsigned long data) \ +#define DEF_TALITOS1_DONE(name, ch_done_mask) \ +static void talitos1_done_##name(unsigned long data) \ +{ \ + struct device *dev = (struct device *)data; \ + struct talitos_private *priv = dev_get_drvdata(dev); \ + unsigned long flags; \ + \ + if (ch_done_mask & 0x10000000) \ + flush_channel(dev, 0, 0, 0); \ + if (priv->num_channels == 1) \ + goto out; \ + if (ch_done_mask & 0x40000000) \ + flush_channel(dev, 1, 0, 0); \ + if (ch_done_mask & 0x00010000) \ + flush_channel(dev, 2, 0, 0); \ + if (ch_done_mask & 0x00040000) \ + flush_channel(dev, 3, 0, 0); \ + \ +out: \ + /* At this point, all completed channels have been processed */ \ + /* Unmask done interrupts for channels completed later on. */ \ + spin_lock_irqsave(&priv->reg_lock, flags); \ + clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ + clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ +} + +DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) + +#define DEF_TALITOS2_DONE(name, ch_done_mask) \ +static void talitos2_done_##name(unsigned long data) \ { \ struct device *dev = (struct device *)data; \ struct talitos_private *priv = dev_get_drvdata(dev); \ @@ -372,12 +421,13 @@ out: \ /* Unmask done interrupts for channels completed later on. */ \ spin_lock_irqsave(&priv->reg_lock, flags); \ setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ - setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \ + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ spin_unlock_irqrestore(&priv->reg_lock, flags); \ } -DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE) -DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE) -DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE) + +DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) +DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) +DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) /* * locate current (offending) descriptor @@ -492,13 +542,21 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - int ch, error, reset_dev = 0, reset_ch = 0; + int ch, error, reset_dev = 0; u32 v, v_lo; + bool is_sec1 = has_ftr_sec1(priv); + int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ for (ch = 0; ch < priv->num_channels; ch++) { /* skip channels without errors */ - if (!(isr & (1 << (ch * 2 + 1)))) - continue; + if (is_sec1) { + /* bits 29, 31, 17, 19 */ + if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) + continue; + } else { + if (!(isr & (1 << (ch * 2 + 1)))) + continue; + } error = -EINVAL; @@ -518,23 +576,28 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) if (v_lo & TALITOS_CCPSR_LO_MDTE) dev_err(dev, "master data transfer error\n"); if (v_lo & TALITOS_CCPSR_LO_SGDLZ) - dev_err(dev, "s/g data length zero error\n"); + dev_err(dev, is_sec1 ? "pointeur not complete error\n" + : "s/g data length zero error\n"); if (v_lo & TALITOS_CCPSR_LO_FPZ) - dev_err(dev, "fetch pointer zero error\n"); + dev_err(dev, is_sec1 ? "parity error\n" + : "fetch pointer zero error\n"); if (v_lo & TALITOS_CCPSR_LO_IDH) dev_err(dev, "illegal descriptor header error\n"); if (v_lo & TALITOS_CCPSR_LO_IEU) - dev_err(dev, "invalid execution unit error\n"); + dev_err(dev, is_sec1 ? "static assignment error\n" + : "invalid exec unit error\n"); if (v_lo & TALITOS_CCPSR_LO_EU) report_eu_error(dev, ch, current_desc_hdr(dev, ch)); - if (v_lo & TALITOS_CCPSR_LO_GB) - dev_err(dev, "gather boundary error\n"); - if (v_lo & TALITOS_CCPSR_LO_GRL) - dev_err(dev, "gather return/length error\n"); - if (v_lo & TALITOS_CCPSR_LO_SB) - dev_err(dev, "scatter boundary error\n"); - if (v_lo & TALITOS_CCPSR_LO_SRL) - dev_err(dev, "scatter return/length error\n"); + if (!is_sec1) { + if (v_lo & TALITOS_CCPSR_LO_GB) + dev_err(dev, "gather boundary error\n"); + if (v_lo & TALITOS_CCPSR_LO_GRL) + dev_err(dev, "gather return/length error\n"); + if (v_lo & TALITOS_CCPSR_LO_SB) + dev_err(dev, "scatter boundary error\n"); + if (v_lo & TALITOS_CCPSR_LO_SRL) + dev_err(dev, "scatter return/length error\n"); + } flush_channel(dev, ch, error, reset_ch); @@ -542,10 +605,10 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) reset_channel(dev, ch); } else { setbits32(priv->chan[ch].reg + TALITOS_CCCR, - TALITOS_CCCR_CONT); + TALITOS2_CCCR_CONT); setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & - TALITOS_CCCR_CONT) && --timeout) + TALITOS2_CCCR_CONT) && --timeout) cpu_relax(); if (timeout == 0) { dev_err(dev, "failed to restart channel %d\n", @@ -554,9 +617,14 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) } } } - if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) { - dev_err(dev, "done overflow, internal time out, or rngu error: " - "ISR 0x%08x_%08x\n", isr, isr_lo); + if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || + (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { + if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) + dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", + isr, isr_lo); + else + dev_err(dev, "done overflow, internal time out, or " + "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); /* purge request queues */ for (ch = 0; ch < priv->num_channels; ch++) @@ -567,8 +635,43 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) } } -#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ -static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ +#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ +static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ +{ \ + struct device *dev = data; \ + struct talitos_private *priv = dev_get_drvdata(dev); \ + u32 isr, isr_lo; \ + unsigned long flags; \ + \ + spin_lock_irqsave(&priv->reg_lock, flags); \ + isr = in_be32(priv->reg + TALITOS_ISR); \ + isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ + /* Acknowledge interrupt */ \ + out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ + out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ + \ + if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + talitos_error(dev, isr & ch_err_mask, isr_lo); \ + } \ + else { \ + if (likely(isr & ch_done_mask)) { \ + /* mask further done interrupts. */ \ + setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ + /* done_task will unmask done interrupts at exit */ \ + tasklet_schedule(&priv->done_task[tlet]); \ + } \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + } \ + \ + return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ + IRQ_NONE; \ +} + +DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) + +#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ +static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ { \ struct device *dev = data; \ struct talitos_private *priv = dev_get_drvdata(dev); \ @@ -599,9 +702,12 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ IRQ_NONE; \ } -DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0) -DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0) -DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1) + +DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) +DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, + 0) +DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, + 1) /* * hwrng @@ -2639,29 +2745,35 @@ static int talitos_probe_irq(struct platform_device *ofdev) struct device_node *np = ofdev->dev.of_node; struct talitos_private *priv = dev_get_drvdata(dev); int err; + bool is_sec1 = has_ftr_sec1(priv); priv->irq[0] = irq_of_parse_and_map(np, 0); if (!priv->irq[0]) { dev_err(dev, "failed to map irq\n"); return -EINVAL; } + if (is_sec1) { + err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, + dev_driver_string(dev), dev); + goto primary_out; + } priv->irq[1] = irq_of_parse_and_map(np, 1); /* get the primary irq line */ if (!priv->irq[1]) { - err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0, + err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, dev_driver_string(dev), dev); goto primary_out; } - err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0, + err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, dev_driver_string(dev), dev); if (err) goto primary_out; /* get the secondary irq line */ - err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0, + err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, dev_driver_string(dev), dev); if (err) { dev_err(dev, "failed to request secondary irq\n"); @@ -2702,20 +2814,6 @@ static int talitos_probe(struct platform_device *ofdev) spin_lock_init(&priv->reg_lock); - err = talitos_probe_irq(ofdev); - if (err) - goto err_out; - - if (!priv->irq[1]) { - tasklet_init(&priv->done_task[0], talitos_done_4ch, - (unsigned long)dev); - } else { - tasklet_init(&priv->done_task[0], talitos_done_ch0_2, - (unsigned long)dev); - tasklet_init(&priv->done_task[1], talitos_done_ch1_3, - (unsigned long)dev); - } - priv->reg = of_iomap(np, 0); if (!priv->reg) { dev_err(dev, "failed to of_iomap\n"); @@ -2783,6 +2881,25 @@ static int talitos_probe(struct platform_device *ofdev) stride = TALITOS2_CH_STRIDE; } + err = talitos_probe_irq(ofdev); + if (err) + goto err_out; + + if (of_device_is_compatible(np, "fsl,sec1.0")) { + tasklet_init(&priv->done_task[0], talitos1_done_4ch, + (unsigned long)dev); + } else { + if (!priv->irq[1]) { + tasklet_init(&priv->done_task[0], talitos2_done_4ch, + (unsigned long)dev); + } else { + tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, + (unsigned long)dev); + tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, + (unsigned long)dev); + } + } + priv->chan = kzalloc(sizeof(struct talitos_channel) * priv->num_channels, GFP_KERNEL); if (!priv->chan) { diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 4faa3b6fc6db..9507c4f620c0 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -188,26 +188,38 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register */ +#define ISR1_FORMAT(x) (((x) << 28) | ((x) << 16)) +#define ISR2_FORMAT(x) (((x) << 4) | (x)) + /* global register offset addresses */ #define TALITOS_MCR 0x1030 /* master control register */ #define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */ #define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */ #define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */ #define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */ -#define TALITOS_MCR_SWR 0x1 /* s/w reset */ +#define TALITOS1_MCR_SWR 0x1000000 /* s/w reset */ +#define TALITOS2_MCR_SWR 0x1 /* s/w reset */ #define TALITOS_MCR_LO 0x1034 #define TALITOS_IMR 0x1008 /* interrupt mask register */ -#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */ -#define TALITOS_IMR_DONE 0x00055 /* done IRQs */ +/* enable channel IRQs */ +#define TALITOS1_IMR_INIT ISR1_FORMAT(0xf) +#define TALITOS1_IMR_DONE ISR1_FORMAT(0x5) /* done IRQs */ +/* enable channel IRQs */ +#define TALITOS2_IMR_INIT (ISR2_FORMAT(0xf) | 0x10000) +#define TALITOS2_IMR_DONE ISR1_FORMAT(0x5) /* done IRQs */ #define TALITOS_IMR_LO 0x100C -#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ +#define TALITOS1_IMR_LO_INIT 0x2000000 /* allow RNGU error IRQs */ +#define TALITOS2_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ #define TALITOS_ISR 0x1010 /* interrupt status register */ -#define TALITOS_ISR_4CHERR 0xaa /* 4 channel errors mask */ -#define TALITOS_ISR_4CHDONE 0x55 /* 4 channel done mask */ -#define TALITOS_ISR_CH_0_2_ERR 0x22 /* channels 0, 2 errors mask */ -#define TALITOS_ISR_CH_0_2_DONE 0x11 /* channels 0, 2 done mask */ -#define TALITOS_ISR_CH_1_3_ERR 0x88 /* channels 1, 3 errors mask */ -#define TALITOS_ISR_CH_1_3_DONE 0x44 /* channels 1, 3 done mask */ +#define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */ +#define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */ +#define TALITOS1_ISR_TEA_ERR 0x00000040 +#define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */ +#define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */ +#define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */ +#define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */ +#define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */ +#define TALITOS2_ISR_CH_1_3_DONE ISR2_FORMAT(0x4) /* ch 1, 3 done mask */ #define TALITOS_ISR_LO 0x1014 #define TALITOS_ICR 0x1018 /* interrupt clear register */ #define TALITOS_ICR_LO 0x101C @@ -219,14 +231,15 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) /* channel configuration register */ #define TALITOS_CCCR 0x8 -#define TALITOS_CCCR_CONT 0x2 /* channel continue */ -#define TALITOS_CCCR_RESET 0x1 /* channel reset */ +#define TALITOS2_CCCR_CONT 0x2 /* channel continue on SEC2 */ +#define TALITOS2_CCCR_RESET 0x1 /* channel reset on SEC2 */ #define TALITOS_CCCR_LO 0xc #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ +#define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */ /* CCPSR: channel pointer status register */ #define TALITOS_CCPSR 0x10 @@ -298,6 +311,8 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) #define TALITOS_EUICR_LO 0x3c #define TALITOS_EU_FIFO 0x800 /* output FIFO */ #define TALITOS_EU_FIFO_LO 0x804 /* output FIFO */ +/* DES unit */ +#define TALITOS1_DEUICR_KPE 0x00200000 /* Key Parity Error */ /* message digest unit */ #define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ /* random number unit */ -- cgit v1.2.3 From 6f65f6ac5fb36a90ebf9a8d57cc4076b82d5009e Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:15 +0200 Subject: crypto: talitos - implement scatter/gather copy for SEC1 SEC1 doesn't support scatter/gather, SEC1 doesn't handle link tables. Therefore, for SEC1 we have to do it by SW. For that, we reserve space at the end of the extended descriptor, in lieu of the space reserved for the link tables on SEC2, and we perform sg_copy() when preparing the descriptors We also adapt the max buffer size which is only 32k on SEC1 while it is 64k on SEC2+ Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 138 ++++++++++++++++++++++++++++++++++------------- drivers/crypto/talitos.h | 3 +- 2 files changed, 103 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 12654056ca53..dddf4b305ddd 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -866,9 +866,10 @@ badkey: * @dst_chained: whether dst is chained or not * @iv_dma: dma address of iv for checking continuity and link table * @dma_len: length of dma mapped link_tbl space - * @dma_link_tbl: bus physical address of link_tbl + * @dma_link_tbl: bus physical address of link_tbl/buf * @desc: h/w descriptor - * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) + * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) + * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) * * if decrypting (with authcheck), or either one of src_nents or dst_nents * is greater than 1, an integrity check value is concatenated to the end @@ -885,7 +886,10 @@ struct talitos_edesc { int dma_len; dma_addr_t dma_link_tbl; struct talitos_desc desc; - struct talitos_ptr link_tbl[0]; + union { + struct talitos_ptr link_tbl[0]; + u8 buf[0]; + }; }; static int talitos_map_sg(struct device *dev, struct scatterlist *sg, @@ -1282,8 +1286,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, dma_addr_t iv_dma = 0; gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; - if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { + if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } @@ -1327,8 +1334,12 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, */ alloc_len = sizeof(struct talitos_edesc); if (assoc_nents || src_nents || dst_nents) { - dma_len = (src_nents + dst_nents + 2 + assoc_nents) * - sizeof(struct talitos_ptr) + authsize; + if (is_sec1) + dma_len = src_nents ? cryptlen : 0 + + dst_nents ? cryptlen : 0; + else + dma_len = (src_nents + dst_nents + 2 + assoc_nents) * + sizeof(struct talitos_ptr) + authsize; alloc_len += dma_len; } else { dma_len = 0; @@ -1485,7 +1496,27 @@ static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, struct scatterlist *dst, unsigned int len, struct talitos_edesc *edesc) { - talitos_sg_unmap(dev, edesc, src, dst); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + if (is_sec1) { + if (!edesc->src_nents) { + dma_unmap_sg(dev, src, 1, + dst != src ? DMA_TO_DEVICE + : DMA_BIDIRECTIONAL); + } + if (dst && edesc->dst_nents) { + dma_sync_single_for_device(dev, + edesc->dma_link_tbl + len, + len, DMA_FROM_DEVICE); + sg_copy_from_buffer(dst, edesc->dst_nents ? : 1, + edesc->buf + len, len); + } else if (dst && dst != src) { + dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE); + } + } else { + talitos_sg_unmap(dev, edesc, src, dst); + } } static void common_nonsnoop_unmap(struct device *dev, @@ -1528,25 +1559,42 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, bool is_sec1 = has_ftr_sec1(priv); to_talitos_ptr_len(ptr, len, is_sec1); - to_talitos_ptr_extent_clear(ptr, is_sec1); - sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, - edesc->src_chained); + if (is_sec1) { + sg_count = edesc->src_nents ? : 1; - if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); - } else { - sg_count = sg_to_link_tbl(src, sg_count, len, - &edesc->link_tbl[0]); - if (sg_count > 1) { - to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); - ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); + if (sg_count == 1) { + dma_map_sg(dev, src, 1, dir); + to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); } else { - /* Only one segment now, so no link tbl needed */ + sg_copy_to_buffer(src, sg_count, edesc->buf, len); + to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + len, DMA_TO_DEVICE); + } + } else { + to_talitos_ptr_extent_clear(ptr, is_sec1); + + sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, + edesc->src_chained); + + if (sg_count == 1) { to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); + } else { + sg_count = sg_to_link_tbl(src, sg_count, len, + &edesc->link_tbl[0]); + if (sg_count > 1) { + to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + dma_sync_single_for_device(dev, + edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed*/ + to_talitos_ptr(ptr, sg_dma_address(src), + is_sec1); + } } } return sg_count; @@ -1560,26 +1608,42 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); - to_talitos_ptr_len(ptr, len, is_sec1); - to_talitos_ptr_extent_clear(ptr, is_sec1); - if (dir != DMA_NONE) sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, dir, edesc->dst_chained); - if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); + to_talitos_ptr_len(ptr, len, is_sec1); + + if (is_sec1) { + if (sg_count == 1) { + if (dir != DMA_NONE) + dma_map_sg(dev, dst, 1, dir); + to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); + } else { + to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1); + dma_sync_single_for_device(dev, + edesc->dma_link_tbl + len, + len, DMA_FROM_DEVICE); + } } else { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[edesc->src_nents + 1]; - - to_talitos_ptr(ptr, edesc->dma_link_tbl + - (edesc->src_nents + 1) * - sizeof(struct talitos_ptr), 0); - ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; - sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); + to_talitos_ptr_extent_clear(ptr, is_sec1); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); + } else { + struct talitos_ptr *link_tbl_ptr = + &edesc->link_tbl[edesc->src_nents + 1]; + + to_talitos_ptr(ptr, edesc->dma_link_tbl + + (edesc->src_nents + 1) * + sizeof(struct talitos_ptr), 0); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + sg_count = sg_to_link_tbl(dst, sg_count, len, + link_tbl_ptr); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } } } diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 9507c4f620c0..314daf55e7f7 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -29,7 +29,8 @@ */ #define TALITOS_TIMEOUT 100000 -#define TALITOS_MAX_DATA_LEN 65535 +#define TALITOS1_MAX_DATA_LEN 32768 +#define TALITOS2_MAX_DATA_LEN 65535 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) -- cgit v1.2.3 From 2d02905ebd22c0271a25e424ab209c8b7067be67 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:18 +0200 Subject: crypto: talitos - SEC1 bugs on 0 data hash SEC1 bugs on 0 data hash, so we submit an already padded block representing 0 data Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dddf4b305ddd..f1406d7b0db5 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1797,6 +1797,27 @@ static void ahash_done(struct device *dev, areq->base.complete(&areq->base, err); } +/* + * SEC1 doesn't like hashing of 0 sized message, so we do the padding + * ourself and submit a padded block + */ +void talitos_handle_buggy_hash(struct talitos_ctx *ctx, + struct talitos_edesc *edesc, + struct talitos_ptr *ptr) +{ + static u8 padded_hash[64] = { + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }; + + pr_err_once("Bug in SEC1, padding ourself\n"); + edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; + map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), + (char *)padded_hash, DMA_TO_DEVICE); +} + static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct ahash_request *areq, unsigned int length, void (*callback) (struct device *dev, @@ -1857,6 +1878,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* last DWORD empty */ desc->ptr[6] = zero_entry; + if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) + talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { common_nonsnoop_hash_unmap(dev, edesc, areq); -- cgit v1.2.3 From 0635b7db143195281c5a29d272bb836be58af1d4 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:20 +0200 Subject: crypto: talitos - Add fsl,sec1.0 compatible We add a specific compatible for SEC1, to handle the differences between SEC1 and SEC2+ Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index f1406d7b0db5..c04074d08461 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -3086,9 +3086,16 @@ err_out: } static const struct of_device_id talitos_match[] = { +#ifdef CONFIG_CRYPTO_DEV_TALITOS1 + { + .compatible = "fsl,sec1.0", + }, +#endif +#ifdef CONFIG_CRYPTO_DEV_TALITOS2 { .compatible = "fsl,sec2.0", }, +#endif {}, }; MODULE_DEVICE_TABLE(of, talitos_match); -- cgit v1.2.3 From d9b3682ffd6d7fa98195eb64307b70e9a25cc655 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 11:56:34 +0800 Subject: crypto: caam - Remove bogus references to crypto API internals The caam driver includes algorithm types that it doesn't even use, such as struct rng_alg which has recently been moved to an internal header file and consequently broke the build of caam. This patch removes these bogus references. Reported-by: Fengguang Wu Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 29071a156cbe..3c025d407aff 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3380,10 +3380,6 @@ struct caam_alg_template { union { struct ablkcipher_alg ablkcipher; struct aead_alg aead; - struct blkcipher_alg blkcipher; - struct cipher_alg cipher; - struct compress_alg compress; - struct rng_alg rng; } template_u; u32 class1_alg_type; u32 class2_alg_type; -- cgit v1.2.3 From c3365ce130e50176533debe1cabebcdb8e61156c Mon Sep 17 00:00:00 2001 From: Leonidas Da Silva Barbosa Date: Thu, 23 Apr 2015 17:40:30 -0300 Subject: crypto: nx - Fixing NX data alignment with nx_sg list In NX we need to pass always a 16 multiple size nx_sg_list to co processor. Trim function handle with this assuring all nx_sg_lists are 16 multiple size, although data was not being considerated when crop was done. It was causing an unalignment between size of the list and data, corrupting csbcpb fields returning a -23 H_ST_PARM error, or invalid operation. This patch fix this recalculating how much data should be put back in to_process variable what assures the size of sg_list will be correct with size of the data. Signed-off-by: Leonidas S. Barbosa Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 1da6dc59d0dd..4856f7287eae 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, * @delta: is the amount we need to crop in order to bound the list. * */ -static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta) +static long int trim_sg_list(struct nx_sg *sg, + struct nx_sg *end, + unsigned int delta, + unsigned int *nbytes) { + long int oplen; + long int data_back; + unsigned int is_delta = delta; + while (delta && end > sg) { struct nx_sg *last = end - 1; @@ -228,7 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d delta -= last->len; } } - return (sg - end) * sizeof(struct nx_sg); + + /* There are cases where we need to crop list in order to make it + * a block size multiple, but we also need to align data. In order to + * that we need to calculate how much we need to put back to be + * processed + */ + oplen = (sg - end) * sizeof(struct nx_sg); + if (is_delta) { + data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len; + data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1)); + *nbytes -= data_back; + } + + return oplen; } /** @@ -330,8 +350,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear * buffers */ - nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta); - nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta); + nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes); + nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes); return 0; } -- cgit v1.2.3 From 10d87b730e1d9f1442cae6487bb3aef8632bed23 Mon Sep 17 00:00:00 2001 From: Leonidas Da Silva Barbosa Date: Thu, 23 Apr 2015 17:41:43 -0300 Subject: crypto: nx - Fixing SHA update bug Bug happens when a data size less than SHA block size is passed. Since first attempt will be saved in buffer, second round attempt get into two step to calculate op.inlen and op.outlen. The issue resides in this step. A wrong value of op.inlen and outlen was being calculated. This patch fix this eliminate the nx_sha_build_sg_list, that is useless in SHA's algorithm context. Instead we call nx_build_sg_list directly and pass a previous calculated max_sg_len to it. Signed-off-by: Leonidas S. Barbosa Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-sha256.c | 84 ++++++++++++++++++++++++------------------ drivers/crypto/nx/nx-sha512.c | 85 +++++++++++++++++++++++++------------------ drivers/crypto/nx/nx.c | 47 ------------------------ drivers/crypto/nx/nx.h | 2 - 4 files changed, 99 insertions(+), 119 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 23621da624c3..4e91bdb83c59 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -33,8 +33,9 @@ static int nx_sha256_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_sg *out_sg; int len; - int rc; + u32 max_sg_len; nx_ctx_init(nx_ctx, HCOP_FC_SHA); @@ -44,15 +45,18 @@ static int nx_sha256_init(struct shash_desc *desc) NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + len = SHA256_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - (u8 *) sctx->state, - NX_DS_SHA256); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); - if (rc) - goto out; + if (len != SHA256_DIGEST_SIZE) + return -EINVAL; sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); @@ -64,7 +68,6 @@ static int nx_sha256_init(struct shash_desc *desc) sctx->state[7] = __cpu_to_be32(SHA256_H7); sctx->count = 0; -out: return 0; } @@ -74,10 +77,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg; u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; int data_len; + u32 max_sg_len; u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); @@ -97,6 +102,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + do { /* * to_process: the SHA256_BLOCK_SIZE data chunk to process in @@ -108,25 +119,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, if (buf_len) { data_len = buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) sctx->buf, - NX_DS_SHA256); + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + &data_len, + max_sg_len); - if (rc || data_len != buf_len) + if (data_len != buf_len) { + rc = -EINVAL; goto out; + } } data_len = to_process - buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) data, - NX_DS_SHA256); + in_sg = nx_build_sg_list(in_sg, (u8 *) data, + &data_len, max_sg_len); - if (rc) - goto out; + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); to_process = (data_len + buf_len); leftover = total - to_process; @@ -173,12 +181,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; - int rc; + u32 max_sg_len; + int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count >= SHA256_BLOCK_SIZE) { @@ -195,25 +210,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); len = sctx->count & (SHA256_BLOCK_SIZE - 1); - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &len, - (u8 *) sctx->buf, - NX_DS_SHA256); + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, + &len, max_sg_len); - if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) + if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { + rc = -EINVAL; goto out; + } len = SHA256_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - out, - NX_DS_SHA256); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); - if (rc || len != SHA256_DIGEST_SIZE) + if (len != SHA256_DIGEST_SIZE) { + rc = -EINVAL; goto out; + } + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index b3adf1022673..e6a58d2ee628 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -32,8 +32,9 @@ static int nx_sha512_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_sg *out_sg; int len; - int rc; + u32 max_sg_len; nx_ctx_init(nx_ctx, HCOP_FC_SHA); @@ -43,15 +44,18 @@ static int nx_sha512_init(struct shash_desc *desc) NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + len = SHA512_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - (u8 *)sctx->state, - NX_DS_SHA512); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); - if (rc || len != SHA512_DIGEST_SIZE) - goto out; + if (len != SHA512_DIGEST_SIZE) + return -EINVAL; sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); @@ -63,7 +67,6 @@ static int nx_sha512_init(struct shash_desc *desc) sctx->state[7] = __cpu_to_be64(SHA512_H7); sctx->count[0] = 0; -out: return 0; } @@ -73,10 +76,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg; u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; int data_len; + u32 max_sg_len; u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); @@ -96,6 +101,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + do { /* * to_process: the SHA512_BLOCK_SIZE data chunk to process in @@ -108,25 +119,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, if (buf_len) { data_len = buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) sctx->buf, - NX_DS_SHA512); + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + &data_len, max_sg_len); - if (rc || data_len != buf_len) + if (data_len != buf_len) { + rc = -EINVAL; goto out; + } } data_len = to_process - buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) data, - NX_DS_SHA512); + in_sg = nx_build_sg_list(in_sg, (u8 *) data, + &data_len, max_sg_len); - if (rc || data_len != (to_process - buf_len)) + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + + if (data_len != (to_process - buf_len)) { + rc = -EINVAL; goto out; + } to_process = (data_len + buf_len); leftover = total - to_process; @@ -172,13 +184,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + u32 max_sg_len; u64 count0; unsigned long irq_flags; - int rc; + int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count[0] >= SHA512_BLOCK_SIZE) { @@ -200,24 +219,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha512.message_bit_length_lo = count0; len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &len, - (u8 *)sctx->buf, - NX_DS_SHA512); + in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, + max_sg_len); - if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) + if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { + rc = -EINVAL; goto out; + } len = SHA512_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - out, - NX_DS_SHA512); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, + max_sg_len); - if (rc) - goto out; + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 4856f7287eae..2e2529ce8d31 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -251,53 +251,6 @@ static long int trim_sg_list(struct nx_sg *sg, return oplen; } -/** - * nx_sha_build_sg_list - walk and build sg list to sha modes - * using right bounds and limits. - * @nx_ctx: NX crypto context for the lists we're building - * @nx_sg: current sg list in or out list - * @op_len: current op_len to be used in order to build a sg list - * @nbytes: number or bytes to be processed - * @offset: buf offset - * @mode: SHA256 or SHA512 - */ -int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx, - struct nx_sg *nx_in_outsg, - s64 *op_len, - unsigned int *nbytes, - u8 *offset, - u32 mode) -{ - unsigned int delta = 0; - unsigned int total = *nbytes; - struct nx_sg *nx_insg = nx_in_outsg; - unsigned int max_sg_len; - - max_sg_len = min_t(u64, nx_ctx->ap->sglen, - nx_driver.of.max_sg_len/sizeof(struct nx_sg)); - max_sg_len = min_t(u64, max_sg_len, - nx_ctx->ap->databytelen/NX_PAGE_SIZE); - - *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); - nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len); - - switch (mode) { - case NX_DS_SHA256: - if (*nbytes < total) - delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1)); - break; - case NX_DS_SHA512: - if (*nbytes < total) - delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1)); - break; - default: - return -EINVAL; - } - *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta); - - return 0; -} - /** * nx_build_sg_lists - walk the input scatterlists and build arrays of NX * scatterlists based on them. diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 6c9ecaaead52..41b87ee03fe2 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -153,8 +153,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm); void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, u32 may_sleep); -int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *, - s64 *, unsigned int *, u8 *, u32); struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int *, -- cgit v1.2.3 From dcd93e8303cbd0ef3dba165daa0633ebd05b3e17 Mon Sep 17 00:00:00 2001 From: "Allan, Bruce W" Date: Mon, 27 Apr 2015 13:58:27 -0700 Subject: crypto: qat - do not duplicate string containing firmware name Use ADF_DH895XCC_FW instead of duplicating the string "qat_895xcc.bin" when referring to the DH895xCC firmware. Signed-off-by: Bruce Allan Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 9decea2779c6..c1b3f0b342e2 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -417,5 +417,5 @@ module_exit(adfdrv_release); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); -MODULE_FIRMWARE("qat_895xcc.bin"); +MODULE_FIRMWARE(ADF_DH895XCC_FW); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); -- cgit v1.2.3 From fdd05e4b9ae22603ed09beb4e179ae7746555a81 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:16 -0400 Subject: crypto: nx - rename nx-842.c to nx-842-pseries.c Move the entire NX-842 driver for the pSeries platform from the file nx-842.c to nx-842-pseries.c. This is required by later patches that add NX-842 support for the PowerNV platform. This patch does not alter the content of the pSeries NX-842 driver at all, it only changes the filename. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/Makefile | 2 +- drivers/crypto/nx/nx-842-pseries.c | 1603 ++++++++++++++++++++++++++++++++++++ drivers/crypto/nx/nx-842.c | 1603 ------------------------------------ 3 files changed, 1604 insertions(+), 1604 deletions(-) create mode 100644 drivers/crypto/nx/nx-842-pseries.c delete mode 100644 drivers/crypto/nx/nx-842.c (limited to 'drivers') diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index bb770ea45ce9..8669ffa8dc5c 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -11,4 +11,4 @@ nx-crypto-objs := nx.o \ nx-sha512.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o -nx-compress-objs := nx-842.o +nx-compress-objs := nx-842-pseries.o diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c new file mode 100644 index 000000000000..887196e9b50c --- /dev/null +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -0,0 +1,1603 @@ +/* + * Driver for IBM Power 842 compression accelerator + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Authors: Robert Jennings + * Seth Jennings + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "nx_csbcpb.h" /* struct nx_csbcpb */ + +#define MODULE_NAME "nx-compress" +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Robert Jennings "); +MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); + +#define SHIFT_4K 12 +#define SHIFT_64K 16 +#define SIZE_4K (1UL << SHIFT_4K) +#define SIZE_64K (1UL << SHIFT_64K) + +/* IO buffer must be 128 byte aligned */ +#define IO_BUFFER_ALIGN 128 + +struct nx842_header { + int blocks_nr; /* number of compressed blocks */ + int offset; /* offset of the first block (from beginning of header) */ + int sizes[0]; /* size of compressed blocks */ +}; + +static inline int nx842_header_size(const struct nx842_header *hdr) +{ + return sizeof(struct nx842_header) + + hdr->blocks_nr * sizeof(hdr->sizes[0]); +} + +/* Macros for fields within nx_csbcpb */ +/* Check the valid bit within the csbcpb valid field */ +#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) + +/* CE macros operate on the completion_extension field bits in the csbcpb. + * CE0 0=full completion, 1=partial completion + * CE1 0=CE0 indicates completion, 1=termination (output may be modified) + * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ +#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) +#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) +#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) + +/* The NX unit accepts data only on 4K page boundaries */ +#define NX842_HW_PAGE_SHIFT SHIFT_4K +#define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT) +#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) + +enum nx842_status { + UNAVAILABLE, + AVAILABLE +}; + +struct ibm_nx842_counters { + atomic64_t comp_complete; + atomic64_t comp_failed; + atomic64_t decomp_complete; + atomic64_t decomp_failed; + atomic64_t swdecomp; + atomic64_t comp_times[32]; + atomic64_t decomp_times[32]; +}; + +static struct nx842_devdata { + struct vio_dev *vdev; + struct device *dev; + struct ibm_nx842_counters *counters; + unsigned int max_sg_len; + unsigned int max_sync_size; + unsigned int max_sync_sg; + enum nx842_status status; +} __rcu *devdata; +static DEFINE_SPINLOCK(devdata_mutex); + +#define NX842_COUNTER_INC(_x) \ +static inline void nx842_inc_##_x( \ + const struct nx842_devdata *dev) { \ + if (dev) \ + atomic64_inc(&dev->counters->_x); \ +} +NX842_COUNTER_INC(comp_complete); +NX842_COUNTER_INC(comp_failed); +NX842_COUNTER_INC(decomp_complete); +NX842_COUNTER_INC(decomp_failed); +NX842_COUNTER_INC(swdecomp); + +#define NX842_HIST_SLOTS 16 + +static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) +{ + int bucket = fls(time); + + if (bucket) + bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); + + atomic64_inc(×[bucket]); +} + +/* NX unit operation flags */ +#define NX842_OP_COMPRESS 0x0 +#define NX842_OP_CRC 0x1 +#define NX842_OP_DECOMPRESS 0x2 +#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) +#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) +#define NX842_OP_ASYNC (1<<23) +#define NX842_OP_NOTIFY (1<<22) +#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) + +static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) +{ + /* No use of DMA mappings within the driver. */ + return 0; +} + +struct nx842_slentry { + unsigned long ptr; /* Real address (use __pa()) */ + unsigned long len; +}; + +/* pHyp scatterlist entry */ +struct nx842_scatterlist { + int entry_nr; /* number of slentries */ + struct nx842_slentry *entries; /* ptr to array of slentries */ +}; + +/* Does not include sizeof(entry_nr) in the size */ +static inline unsigned long nx842_get_scatterlist_size( + struct nx842_scatterlist *sl) +{ + return sl->entry_nr * sizeof(struct nx842_slentry); +} + +static inline unsigned long nx842_get_pa(void *addr) +{ + if (is_vmalloc_addr(addr)) + return page_to_phys(vmalloc_to_page(addr)) + + offset_in_page(addr); + else + return __pa(addr); +} + +static int nx842_build_scatterlist(unsigned long buf, int len, + struct nx842_scatterlist *sl) +{ + unsigned long nextpage; + struct nx842_slentry *entry; + + sl->entry_nr = 0; + + entry = sl->entries; + while (len) { + entry->ptr = nx842_get_pa((void *)buf); + nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); + if (nextpage < buf + len) { + /* we aren't at the end yet */ + if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE)) + /* we are in the middle (or beginning) */ + entry->len = NX842_HW_PAGE_SIZE; + else + /* we are at the beginning */ + entry->len = nextpage - buf; + } else { + /* at the end */ + entry->len = len; + } + + len -= entry->len; + buf += entry->len; + sl->entry_nr++; + entry++; + } + + return 0; +} + +/* + * Working memory for software decompression + */ +struct sw842_fifo { + union { + char f8[256][8]; + char f4[512][4]; + }; + char f2[256][2]; + unsigned char f84_full; + unsigned char f2_full; + unsigned char f8_count; + unsigned char f2_count; + unsigned int f4_count; +}; + +/* + * Working memory for crypto API + */ +struct nx842_workmem { + char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */ + union { + /* hardware working memory */ + struct { + /* scatterlist */ + char slin[SIZE_4K]; + char slout[SIZE_4K]; + /* coprocessor status/parameter block */ + struct nx_csbcpb csbcpb; + }; + /* software working memory */ + struct sw842_fifo swfifo; /* software decompression fifo */ + }; +}; + +int nx842_get_workmem_size(void) +{ + return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE; +} +EXPORT_SYMBOL_GPL(nx842_get_workmem_size); + +int nx842_get_workmem_size_aligned(void) +{ + return sizeof(struct nx842_workmem); +} +EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned); + +static int nx842_validate_result(struct device *dev, + struct cop_status_block *csb) +{ + /* The csb must be valid after returning from vio_h_cop_sync */ + if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { + dev_err(dev, "%s: cspcbp not valid upon completion.\n", + __func__); + dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", + csb->valid, + csb->crb_seq_number, + csb->completion_code, + csb->completion_extension); + dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", + csb->processed_byte_count, + (unsigned long)csb->address); + return -EIO; + } + + /* Check return values from the hardware in the CSB */ + switch (csb->completion_code) { + case 0: /* Completed without error */ + break; + case 64: /* Target bytes > Source bytes during compression */ + case 13: /* Output buffer too small */ + dev_dbg(dev, "%s: Compression output larger than input\n", + __func__); + return -ENOSPC; + case 66: /* Input data contains an illegal template field */ + case 67: /* Template indicates data past the end of the input stream */ + dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", + __func__, csb->completion_code); + return -EINVAL; + default: + dev_dbg(dev, "%s: Unspecified error (code:%d)\n", + __func__, csb->completion_code); + return -EIO; + } + + /* Hardware sanity check */ + if (!NX842_CSBCPB_CE2(csb->completion_extension)) { + dev_err(dev, "%s: No error returned by hardware, but " + "data returned is unusable, contact support.\n" + "(Additional info: csbcbp->processed bytes " + "does not specify processed bytes for the " + "target buffer.)\n", __func__); + return -EIO; + } + + return 0; +} + +/** + * nx842_compress - Compress data using the 842 algorithm + * + * Compression provide by the NX842 coprocessor on IBM Power systems. + * The input buffer is compressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * compressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: Pointer to input buffer, must be page aligned + * @inlen: Length of input buffer, must be PAGE_SIZE + * @out: Pointer to output buffer + * @outlen: Length of output buffer + * @wrkmem: ptr to buffer for working memory, size determined by + * nx842_get_workmem_size() + * + * Returns: + * 0 Success, output of length @outlen stored in the buffer at @out + * -ENOMEM Unable to allocate internal buffers + * -ENOSPC Output buffer is to small + * -EMSGSIZE XXX Difficult to describe this limitation + * -EIO Internal error + * -ENODEV Hardware unavailable + */ +int nx842_compress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlen, void *wmem) +{ + struct nx842_header *hdr; + struct nx842_devdata *local_devdata; + struct device *dev = NULL; + struct nx842_workmem *workmem; + struct nx842_scatterlist slin, slout; + struct nx_csbcpb *csbcpb; + int ret = 0, max_sync_size, i, bytesleft, size, hdrsize; + unsigned long inbuf, outbuf, padding; + struct vio_pfo_op op = { + .done = NULL, + .handle = 0, + .timeout = 0, + }; + unsigned long start_time = get_tb(); + + /* + * Make sure input buffer is 64k page aligned. This is assumed since + * this driver is designed for page compression only (for now). This + * is very nice since we can now use direct DDE(s) for the input and + * the alignment is guaranteed. + */ + inbuf = (unsigned long)in; + if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE) + return -EINVAL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (!local_devdata || !local_devdata->dev) { + rcu_read_unlock(); + return -ENODEV; + } + max_sync_size = local_devdata->max_sync_size; + dev = local_devdata->dev; + + /* Create the header */ + hdr = (struct nx842_header *)out; + hdr->blocks_nr = PAGE_SIZE / max_sync_size; + hdrsize = nx842_header_size(hdr); + outbuf = (unsigned long)out + hdrsize; + bytesleft = *outlen - hdrsize; + + /* Init scatterlist */ + workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, + NX842_HW_PAGE_SIZE); + slin.entries = (struct nx842_slentry *)workmem->slin; + slout.entries = (struct nx842_slentry *)workmem->slout; + + /* Init operation */ + op.flags = NX842_OP_COMPRESS; + csbcpb = &workmem->csbcpb; + memset(csbcpb, 0, sizeof(*csbcpb)); + op.csbcpb = nx842_get_pa(csbcpb); + op.out = nx842_get_pa(slout.entries); + + for (i = 0; i < hdr->blocks_nr; i++) { + /* + * Aligning the output blocks to 128 bytes does waste space, + * but it prevents the need for bounce buffers and memory + * copies. It also simplifies the code a lot. In the worst + * case (64k page, 4k max_sync_size), you lose up to + * (128*16)/64k = ~3% the compression factor. For 64k + * max_sync_size, the loss would be at most 128/64k = ~0.2%. + */ + padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf; + outbuf += padding; + bytesleft -= padding; + if (i == 0) + /* save offset into first block in header */ + hdr->offset = padding + hdrsize; + + if (bytesleft <= 0) { + ret = -ENOSPC; + goto unlock; + } + + /* + * NOTE: If the default max_sync_size is changed from 4k + * to 64k, remove the "likely" case below, since a + * scatterlist will always be needed. + */ + if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { + /* Create direct DDE */ + op.in = nx842_get_pa((void *)inbuf); + op.inlen = max_sync_size; + + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(inbuf, max_sync_size, &slin); + op.in = nx842_get_pa(slin.entries); + op.inlen = -nx842_get_scatterlist_size(&slin); + } + + /* + * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect + * DDE is required for the outbuf. + * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must + * also be page aligned (1 in 128/4k=32 chance) in order + * to use a direct DDE. + * This is unlikely, just use an indirect DDE always. + */ + nx842_build_scatterlist(outbuf, + min(bytesleft, max_sync_size), &slout); + /* op.out set before loop */ + op.outlen = -nx842_get_scatterlist_size(&slout); + + /* Send request to pHyp */ + ret = vio_h_cop_sync(local_devdata->vdev, &op); + + /* Check for pHyp error */ + if (ret) { + dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", + __func__, ret, op.hcall_err); + ret = -EIO; + goto unlock; + } + + /* Check for hardware error */ + ret = nx842_validate_result(dev, &csbcpb->csb); + if (ret && ret != -ENOSPC) + goto unlock; + + /* Handle incompressible data */ + if (unlikely(ret == -ENOSPC)) { + if (bytesleft < max_sync_size) { + /* + * Not enough space left in the output buffer + * to store uncompressed block + */ + goto unlock; + } else { + /* Store incompressible block */ + memcpy((void *)outbuf, (void *)inbuf, + max_sync_size); + hdr->sizes[i] = -max_sync_size; + outbuf += max_sync_size; + bytesleft -= max_sync_size; + /* Reset ret, incompressible data handled */ + ret = 0; + } + } else { + /* Normal case, compression was successful */ + size = csbcpb->csb.processed_byte_count; + dev_dbg(dev, "%s: processed_bytes=%d\n", + __func__, size); + hdr->sizes[i] = size; + outbuf += size; + bytesleft -= size; + } + + inbuf += max_sync_size; + } + + *outlen = (unsigned int)(outbuf - (unsigned long)out); + +unlock: + if (ret) + nx842_inc_comp_failed(local_devdata); + else { + nx842_inc_comp_complete(local_devdata); + ibm_nx842_incr_hist(local_devdata->counters->comp_times, + (get_tb() - start_time) / tb_ticks_per_usec); + } + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(nx842_compress); + +static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, + const void *); + +/** + * nx842_decompress - Decompress data using the 842 algorithm + * + * Decompression provide by the NX842 coprocessor on IBM Power systems. + * The input buffer is decompressed and the result is stored in the + * provided output buffer. The size allocated to the output buffer is + * provided by the caller of this function in @outlen. Upon return from + * this function @outlen contains the length of the decompressed data. + * If there is an error then @outlen will be 0 and an error will be + * specified by the return code from this function. + * + * @in: Pointer to input buffer, will use bounce buffer if not 128 byte + * aligned + * @inlen: Length of input buffer + * @out: Pointer to output buffer, must be page aligned + * @outlen: Length of output buffer, must be PAGE_SIZE + * @wrkmem: ptr to buffer for working memory, size determined by + * nx842_get_workmem_size() + * + * Returns: + * 0 Success, output of length @outlen stored in the buffer at @out + * -ENODEV Hardware decompression device is unavailable + * -ENOMEM Unable to allocate internal buffers + * -ENOSPC Output buffer is to small + * -EINVAL Bad input data encountered when attempting decompress + * -EIO Internal error + */ +int nx842_decompress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlen, void *wmem) +{ + struct nx842_header *hdr; + struct nx842_devdata *local_devdata; + struct device *dev = NULL; + struct nx842_workmem *workmem; + struct nx842_scatterlist slin, slout; + struct nx_csbcpb *csbcpb; + int ret = 0, i, size, max_sync_size; + unsigned long inbuf, outbuf; + struct vio_pfo_op op = { + .done = NULL, + .handle = 0, + .timeout = 0, + }; + unsigned long start_time = get_tb(); + + /* Ensure page alignment and size */ + outbuf = (unsigned long)out; + if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE) + return -EINVAL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (local_devdata) + dev = local_devdata->dev; + + /* Get header */ + hdr = (struct nx842_header *)in; + + workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, + NX842_HW_PAGE_SIZE); + + inbuf = (unsigned long)in + hdr->offset; + if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) { + /* Copy block(s) into bounce buffer for alignment */ + memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset); + inbuf = (unsigned long)workmem->bounce; + } + + /* Init scatterlist */ + slin.entries = (struct nx842_slentry *)workmem->slin; + slout.entries = (struct nx842_slentry *)workmem->slout; + + /* Init operation */ + op.flags = NX842_OP_DECOMPRESS; + csbcpb = &workmem->csbcpb; + memset(csbcpb, 0, sizeof(*csbcpb)); + op.csbcpb = nx842_get_pa(csbcpb); + + /* + * max_sync_size may have changed since compression, + * so we can't read it from the device info. We need + * to derive it from hdr->blocks_nr. + */ + max_sync_size = PAGE_SIZE / hdr->blocks_nr; + + for (i = 0; i < hdr->blocks_nr; i++) { + /* Skip padding */ + inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN); + + if (hdr->sizes[i] < 0) { + /* Negative sizes indicate uncompressed data blocks */ + size = abs(hdr->sizes[i]); + memcpy((void *)outbuf, (void *)inbuf, size); + outbuf += size; + inbuf += size; + continue; + } + + if (!dev) + goto sw; + + /* + * The better the compression, the more likely the "likely" + * case becomes. + */ + if (likely((inbuf & NX842_HW_PAGE_MASK) == + ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { + /* Create direct DDE */ + op.in = nx842_get_pa((void *)inbuf); + op.inlen = hdr->sizes[i]; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); + op.in = nx842_get_pa(slin.entries); + op.inlen = -nx842_get_scatterlist_size(&slin); + } + + /* + * NOTE: If the default max_sync_size is changed from 4k + * to 64k, remove the "likely" case below, since a + * scatterlist will always be needed. + */ + if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { + /* Create direct DDE */ + op.out = nx842_get_pa((void *)outbuf); + op.outlen = max_sync_size; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(outbuf, max_sync_size, &slout); + op.out = nx842_get_pa(slout.entries); + op.outlen = -nx842_get_scatterlist_size(&slout); + } + + /* Send request to pHyp */ + ret = vio_h_cop_sync(local_devdata->vdev, &op); + + /* Check for pHyp error */ + if (ret) { + dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", + __func__, ret, op.hcall_err); + dev = NULL; + goto sw; + } + + /* Check for hardware error */ + ret = nx842_validate_result(dev, &csbcpb->csb); + if (ret) { + dev = NULL; + goto sw; + } + + /* HW decompression success */ + inbuf += hdr->sizes[i]; + outbuf += csbcpb->csb.processed_byte_count; + continue; + +sw: + /* software decompression */ + size = max_sync_size; + ret = sw842_decompress( + (unsigned char *)inbuf, hdr->sizes[i], + (unsigned char *)outbuf, &size, wmem); + if (ret) + pr_debug("%s: sw842_decompress failed with %d\n", + __func__, ret); + + if (ret) { + if (ret != -ENOSPC && ret != -EINVAL && + ret != -EMSGSIZE) + ret = -EIO; + goto unlock; + } + + /* SW decompression success */ + inbuf += hdr->sizes[i]; + outbuf += size; + } + + *outlen = (unsigned int)(outbuf - (unsigned long)out); + +unlock: + if (ret) + /* decompress fail */ + nx842_inc_decomp_failed(local_devdata); + else { + if (!dev) + /* software decompress */ + nx842_inc_swdecomp(local_devdata); + nx842_inc_decomp_complete(local_devdata); + ibm_nx842_incr_hist(local_devdata->counters->decomp_times, + (get_tb() - start_time) / tb_ticks_per_usec); + } + + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(nx842_decompress); + +/** + * nx842_OF_set_defaults -- Set default (disabled) values for devdata + * + * @devdata - struct nx842_devdata to update + * + * Returns: + * 0 on success + * -ENOENT if @devdata ptr is NULL + */ +static int nx842_OF_set_defaults(struct nx842_devdata *devdata) +{ + if (devdata) { + devdata->max_sync_size = 0; + devdata->max_sync_sg = 0; + devdata->max_sg_len = 0; + devdata->status = UNAVAILABLE; + return 0; + } else + return -ENOENT; +} + +/** + * nx842_OF_upd_status -- Update the device info from OF status prop + * + * The status property indicates if the accelerator is enabled. If the + * device is in the OF tree it indicates that the hardware is present. + * The status field indicates if the device is enabled when the status + * is 'okay'. Otherwise the device driver will be disabled. + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 - Device is available + * -EINVAL - Device is not available + */ +static int nx842_OF_upd_status(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + const char *status = (const char *)prop->value; + + if (!strncmp(status, "okay", (size_t)prop->length)) { + devdata->status = AVAILABLE; + } else { + dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", + __func__, status); + devdata->status = UNAVAILABLE; + } + + return ret; +} + +/** + * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop + * + * Definition of the 'ibm,max-sg-len' OF property: + * This field indicates the maximum byte length of a scatter list + * for the platform facility. It is a single cell encoded as with encode-int. + * + * Example: + * # od -x ibm,max-sg-len + * 0000000 0000 0ff0 + * + * In this example, the maximum byte length of a scatter list is + * 0x0ff0 (4,080). + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 on success + * -EINVAL on failure + */ +static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + const int *maxsglen = prop->value; + + if (prop->length != sizeof(*maxsglen)) { + dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); + dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, + prop->length, sizeof(*maxsglen)); + ret = -EINVAL; + } else { + devdata->max_sg_len = (unsigned int)min(*maxsglen, + (int)NX842_HW_PAGE_SIZE); + } + + return ret; +} + +/** + * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop + * + * Definition of the 'ibm,max-sync-cop' OF property: + * Two series of cells. The first series of cells represents the maximums + * that can be synchronously compressed. The second series of cells + * represents the maximums that can be synchronously decompressed. + * 1. The first cell in each series contains the count of the number of + * data length, scatter list elements pairs that follow – each being + * of the form + * a. One cell data byte length + * b. One cell total number of scatter list elements + * + * Example: + * # od -x ibm,max-sync-cop + * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 + * 0000020 0000 1000 0000 01fe + * + * In this example, compression supports 0x1000 (4,096) data byte length + * and 0x1fe (510) total scatter list elements. Decompression supports + * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list + * elements. + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 on success + * -EINVAL on failure + */ +static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + const struct maxsynccop_t { + int comp_elements; + int comp_data_limit; + int comp_sg_limit; + int decomp_elements; + int decomp_data_limit; + int decomp_sg_limit; + } *maxsynccop; + + if (prop->length != sizeof(*maxsynccop)) { + dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); + dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, + sizeof(*maxsynccop)); + ret = -EINVAL; + goto out; + } + + maxsynccop = (const struct maxsynccop_t *)prop->value; + + /* Use one limit rather than separate limits for compression and + * decompression. Set a maximum for this so as not to exceed the + * size that the header can support and round the value down to + * the hardware page size (4K) */ + devdata->max_sync_size = + (unsigned int)min(maxsynccop->comp_data_limit, + maxsynccop->decomp_data_limit); + + devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, + SIZE_64K); + + if (devdata->max_sync_size < SIZE_4K) { + dev_err(devdata->dev, "%s: hardware max data size (%u) is " + "less than the driver minimum, unable to use " + "the hardware device\n", + __func__, devdata->max_sync_size); + ret = -EINVAL; + goto out; + } + + devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit, + maxsynccop->decomp_sg_limit); + if (devdata->max_sync_sg < 1) { + dev_err(devdata->dev, "%s: hardware max sg size (%u) is " + "less than the driver minimum, unable to use " + "the hardware device\n", + __func__, devdata->max_sync_sg); + ret = -EINVAL; + goto out; + } + +out: + return ret; +} + +/** + * + * nx842_OF_upd -- Handle OF properties updates for the device. + * + * Set all properties from the OF tree. Optionally, a new property + * can be provided by the @new_prop pointer to overwrite an existing value. + * The device will remain disabled until all values are valid, this function + * will return an error for updates unless all values are valid. + * + * @new_prop: If not NULL, this property is being updated. If NULL, update + * all properties from the current values in the OF tree. + * + * Returns: + * 0 - Success + * -ENOMEM - Could not allocate memory for new devdata structure + * -EINVAL - property value not found, new_prop is not a recognized + * property for the device or property value is not valid. + * -ENODEV - Device is not available + */ +static int nx842_OF_upd(struct property *new_prop) +{ + struct nx842_devdata *old_devdata = NULL; + struct nx842_devdata *new_devdata = NULL; + struct device_node *of_node = NULL; + struct property *status = NULL; + struct property *maxsglen = NULL; + struct property *maxsyncop = NULL; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + if (old_devdata) + of_node = old_devdata->dev->of_node; + + if (!old_devdata || !of_node) { + pr_err("%s: device is not available\n", __func__); + spin_unlock_irqrestore(&devdata_mutex, flags); + return -ENODEV; + } + + new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); + if (!new_devdata) { + dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); + ret = -ENOMEM; + goto error_out; + } + + memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); + new_devdata->counters = old_devdata->counters; + + /* Set ptrs for existing properties */ + status = of_find_property(of_node, "status", NULL); + maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); + maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); + if (!status || !maxsglen || !maxsyncop) { + dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); + ret = -EINVAL; + goto error_out; + } + + /* + * If this is a property update, there are only certain properties that + * we care about. Bail if it isn't in the below list + */ + if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || + strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || + strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) + goto out; + + /* Perform property updates */ + ret = nx842_OF_upd_status(new_devdata, status); + if (ret) + goto error_out; + + ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); + if (ret) + goto error_out; + + ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); + if (ret) + goto error_out; + +out: + dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", + __func__, new_devdata->max_sync_size, + old_devdata->max_sync_size); + dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", + __func__, new_devdata->max_sync_sg, + old_devdata->max_sync_sg); + dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", + __func__, new_devdata->max_sg_len, + old_devdata->max_sg_len); + + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(new_devdata->dev, new_devdata); + kfree(old_devdata); + return 0; + +error_out: + if (new_devdata) { + dev_info(old_devdata->dev, "%s: device disabled\n", __func__); + nx842_OF_set_defaults(new_devdata); + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(new_devdata->dev, new_devdata); + kfree(old_devdata); + } else { + dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); + spin_unlock_irqrestore(&devdata_mutex, flags); + } + + if (!ret) + ret = -EINVAL; + return ret; +} + +/** + * nx842_OF_notifier - Process updates to OF properties for the device + * + * @np: notifier block + * @action: notifier action + * @update: struct pSeries_reconfig_prop_update pointer if action is + * PSERIES_UPDATE_PROPERTY + * + * Returns: + * NOTIFY_OK on success + * NOTIFY_BAD encoded with error number on failure, use + * notifier_to_errno() to decode this value + */ +static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, + void *data) +{ + struct of_reconfig_data *upd = data; + struct nx842_devdata *local_devdata; + struct device_node *node = NULL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (local_devdata) + node = local_devdata->dev->of_node; + + if (local_devdata && + action == OF_RECONFIG_UPDATE_PROPERTY && + !strcmp(upd->dn->name, node->name)) { + rcu_read_unlock(); + nx842_OF_upd(upd->prop); + } else + rcu_read_unlock(); + + return NOTIFY_OK; +} + +static struct notifier_block nx842_of_nb = { + .notifier_call = nx842_OF_notifier, +}; + +#define nx842_counter_read(_name) \ +static ssize_t nx842_##_name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) { \ + struct nx842_devdata *local_devdata; \ + int p = 0; \ + rcu_read_lock(); \ + local_devdata = rcu_dereference(devdata); \ + if (local_devdata) \ + p = snprintf(buf, PAGE_SIZE, "%ld\n", \ + atomic64_read(&local_devdata->counters->_name)); \ + rcu_read_unlock(); \ + return p; \ +} + +#define NX842DEV_COUNTER_ATTR_RO(_name) \ + nx842_counter_read(_name); \ + static struct device_attribute dev_attr_##_name = __ATTR(_name, \ + 0444, \ + nx842_##_name##_show,\ + NULL); + +NX842DEV_COUNTER_ATTR_RO(comp_complete); +NX842DEV_COUNTER_ATTR_RO(comp_failed); +NX842DEV_COUNTER_ATTR_RO(decomp_complete); +NX842DEV_COUNTER_ATTR_RO(decomp_failed); +NX842DEV_COUNTER_ATTR_RO(swdecomp); + +static ssize_t nx842_timehist_show(struct device *, + struct device_attribute *, char *); + +static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, + nx842_timehist_show, NULL); +static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, + 0444, nx842_timehist_show, NULL); + +static ssize_t nx842_timehist_show(struct device *dev, + struct device_attribute *attr, char *buf) { + char *p = buf; + struct nx842_devdata *local_devdata; + atomic64_t *times; + int bytes_remain = PAGE_SIZE; + int bytes; + int i; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (!local_devdata) { + rcu_read_unlock(); + return 0; + } + + if (attr == &dev_attr_comp_times) + times = local_devdata->counters->comp_times; + else if (attr == &dev_attr_decomp_times) + times = local_devdata->counters->decomp_times; + else { + rcu_read_unlock(); + return 0; + } + + for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { + bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n", + i ? (2<<(i-1)) : 0, (2<vdev != NULL) { + dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); + ret = -1; + goto error_unlock; + } + + dev_set_drvdata(&viodev->dev, NULL); + + new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); + if (!new_devdata) { + dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); + ret = -ENOMEM; + goto error_unlock; + } + + new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), + GFP_NOFS); + if (!new_devdata->counters) { + dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); + ret = -ENOMEM; + goto error_unlock; + } + + new_devdata->vdev = viodev; + new_devdata->dev = &viodev->dev; + nx842_OF_set_defaults(new_devdata); + + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + kfree(old_devdata); + + of_reconfig_notifier_register(&nx842_of_nb); + + ret = nx842_OF_upd(NULL); + if (ret && ret != -ENODEV) { + dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); + ret = -1; + goto error; + } + + rcu_read_lock(); + dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); + rcu_read_unlock(); + + if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { + dev_err(&viodev->dev, "could not create sysfs device attributes\n"); + ret = -1; + goto error; + } + + return 0; + +error_unlock: + spin_unlock_irqrestore(&devdata_mutex, flags); + if (new_devdata) + kfree(new_devdata->counters); + kfree(new_devdata); +error: + return ret; +} + +static int __exit nx842_remove(struct vio_dev *viodev) +{ + struct nx842_devdata *old_devdata; + unsigned long flags; + + pr_info("Removing IBM Power 842 compression device\n"); + sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + of_reconfig_notifier_unregister(&nx842_of_nb); + RCU_INIT_POINTER(devdata, NULL); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(&viodev->dev, NULL); + if (old_devdata) + kfree(old_devdata->counters); + kfree(old_devdata); + return 0; +} + +static struct vio_device_id nx842_driver_ids[] = { + {"ibm,compression-v1", "ibm,compression"}, + {"", ""}, +}; + +static struct vio_driver nx842_driver = { + .name = MODULE_NAME, + .probe = nx842_probe, + .remove = __exit_p(nx842_remove), + .get_desired_dma = nx842_get_desired_dma, + .id_table = nx842_driver_ids, +}; + +static int __init nx842_init(void) +{ + struct nx842_devdata *new_devdata; + pr_info("Registering IBM Power 842 compression driver\n"); + + RCU_INIT_POINTER(devdata, NULL); + new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); + if (!new_devdata) { + pr_err("Could not allocate memory for device data\n"); + return -ENOMEM; + } + new_devdata->status = UNAVAILABLE; + RCU_INIT_POINTER(devdata, new_devdata); + + return vio_register_driver(&nx842_driver); +} + +module_init(nx842_init); + +static void __exit nx842_exit(void) +{ + struct nx842_devdata *old_devdata; + unsigned long flags; + + pr_info("Exiting IBM Power 842 compression driver\n"); + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + RCU_INIT_POINTER(devdata, NULL); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + if (old_devdata) + dev_set_drvdata(old_devdata->dev, NULL); + kfree(old_devdata); + vio_unregister_driver(&nx842_driver); +} + +module_exit(nx842_exit); + +/********************************* + * 842 software decompressor +*********************************/ +typedef int (*sw842_template_op)(const char **, int *, unsigned char **, + struct sw842_fifo *); + +static int sw842_data8(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_data4(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_data2(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_ptr8(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_ptr4(const char **, int *, unsigned char **, + struct sw842_fifo *); +static int sw842_ptr2(const char **, int *, unsigned char **, + struct sw842_fifo *); + +/* special templates */ +#define SW842_TMPL_REPEAT 0x1B +#define SW842_TMPL_ZEROS 0x1C +#define SW842_TMPL_EOF 0x1E + +static sw842_template_op sw842_tmpl_ops[26][4] = { + { sw842_data8, NULL}, /* 0 (00000) */ + { sw842_data4, sw842_data2, sw842_ptr2, NULL}, + { sw842_data4, sw842_ptr2, sw842_data2, NULL}, + { sw842_data4, sw842_ptr2, sw842_ptr2, NULL}, + { sw842_data4, sw842_ptr4, NULL}, + { sw842_data2, sw842_ptr2, sw842_data4, NULL}, + { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2}, + { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2}, + { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,}, + { sw842_data2, sw842_ptr2, sw842_ptr4, NULL}, + { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */ + { sw842_ptr2, sw842_data4, sw842_ptr2, NULL}, + { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2}, + { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2}, + { sw842_ptr2, sw842_data2, sw842_ptr4, NULL}, + { sw842_ptr2, sw842_ptr2, sw842_data4, NULL}, + { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2}, + { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2}, + { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2}, + { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL}, + { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */ + { sw842_ptr4, sw842_data2, sw842_ptr2, NULL}, + { sw842_ptr4, sw842_ptr2, sw842_data2, NULL}, + { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL}, + { sw842_ptr4, sw842_ptr4, NULL}, + { sw842_ptr8, NULL} +}; + +/* Software decompress helpers */ + +static uint8_t sw842_get_byte(const char *buf, int bit) +{ + uint8_t tmpl; + uint16_t tmp; + tmp = htons(*(uint16_t *)(buf)); + tmp = (uint16_t)(tmp << bit); + tmp = ntohs(tmp); + memcpy(&tmpl, &tmp, 1); + return tmpl; +} + +static uint8_t sw842_get_template(const char **buf, int *bit) +{ + uint8_t byte; + byte = sw842_get_byte(*buf, *bit); + byte = byte >> 3; + byte &= 0x1F; + *buf += (*bit + 5) / 8; + *bit = (*bit + 5) % 8; + return byte; +} + +/* repeat_count happens to be 5-bit too (like the template) */ +static uint8_t sw842_get_repeat_count(const char **buf, int *bit) +{ + uint8_t byte; + byte = sw842_get_byte(*buf, *bit); + byte = byte >> 2; + byte &= 0x3F; + *buf += (*bit + 6) / 8; + *bit = (*bit + 6) % 8; + return byte; +} + +static uint8_t sw842_get_ptr2(const char **buf, int *bit) +{ + uint8_t ptr; + ptr = sw842_get_byte(*buf, *bit); + (*buf)++; + return ptr; +} + +static uint16_t sw842_get_ptr4(const char **buf, int *bit, + struct sw842_fifo *fifo) +{ + uint16_t ptr; + ptr = htons(*(uint16_t *)(*buf)); + ptr = (uint16_t)(ptr << *bit); + ptr = ptr >> 7; + ptr &= 0x01FF; + *buf += (*bit + 9) / 8; + *bit = (*bit + 9) % 8; + return ptr; +} + +static uint8_t sw842_get_ptr8(const char **buf, int *bit, + struct sw842_fifo *fifo) +{ + return sw842_get_ptr2(buf, bit); +} + +/* Software decompress template ops */ + +static int sw842_data8(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + int ret; + + ret = sw842_data4(inbuf, inbit, outbuf, fifo); + if (ret) + return ret; + ret = sw842_data4(inbuf, inbit, outbuf, fifo); + return ret; +} + +static int sw842_data4(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + int ret; + + ret = sw842_data2(inbuf, inbit, outbuf, fifo); + if (ret) + return ret; + ret = sw842_data2(inbuf, inbit, outbuf, fifo); + return ret; +} + +static int sw842_data2(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + **outbuf = sw842_get_byte(*inbuf, *inbit); + (*inbuf)++; + (*outbuf)++; + **outbuf = sw842_get_byte(*inbuf, *inbit); + (*inbuf)++; + (*outbuf)++; + return 0; +} + +static int sw842_ptr8(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + uint8_t ptr; + ptr = sw842_get_ptr8(inbuf, inbit, fifo); + if (!fifo->f84_full && (ptr >= fifo->f8_count)) + return 1; + memcpy(*outbuf, fifo->f8[ptr], 8); + *outbuf += 8; + return 0; +} + +static int sw842_ptr4(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + uint16_t ptr; + ptr = sw842_get_ptr4(inbuf, inbit, fifo); + if (!fifo->f84_full && (ptr >= fifo->f4_count)) + return 1; + memcpy(*outbuf, fifo->f4[ptr], 4); + *outbuf += 4; + return 0; +} + +static int sw842_ptr2(const char **inbuf, int *inbit, + unsigned char **outbuf, struct sw842_fifo *fifo) +{ + uint8_t ptr; + ptr = sw842_get_ptr2(inbuf, inbit); + if (!fifo->f2_full && (ptr >= fifo->f2_count)) + return 1; + memcpy(*outbuf, fifo->f2[ptr], 2); + *outbuf += 2; + return 0; +} + +static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo) +{ + unsigned char initial_f2count = fifo->f2_count; + + memcpy(fifo->f8[fifo->f8_count], buf, 8); + fifo->f4_count += 2; + fifo->f8_count += 1; + + if (!fifo->f84_full && fifo->f4_count >= 512) { + fifo->f84_full = 1; + fifo->f4_count /= 512; + } + + memcpy(fifo->f2[fifo->f2_count++], buf, 2); + memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2); + memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2); + memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2); + if (fifo->f2_count < initial_f2count) + fifo->f2_full = 1; +} + +static int sw842_decompress(const unsigned char *src, int srclen, + unsigned char *dst, int *destlen, + const void *wrkmem) +{ + uint8_t tmpl; + const char *inbuf; + int inbit = 0; + unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf; + const char *inbuf_end; + sw842_template_op op; + int opindex; + int i, repeat_count; + struct sw842_fifo *fifo; + int ret = 0; + + fifo = &((struct nx842_workmem *)(wrkmem))->swfifo; + memset(fifo, 0, sizeof(*fifo)); + + origbuf = NULL; + inbuf = src; + inbuf_end = src + srclen; + outbuf = dst; + outbuf_end = dst + *destlen; + + while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) { + if (inbuf >= inbuf_end) { + ret = -EINVAL; + goto out; + } + + opindex = 0; + prevbuf = origbuf; + origbuf = outbuf; + switch (tmpl) { + case SW842_TMPL_REPEAT: + if (prevbuf == NULL) { + ret = -EINVAL; + goto out; + } + + repeat_count = sw842_get_repeat_count(&inbuf, + &inbit) + 1; + + /* Did the repeat count advance past the end of input */ + if (inbuf > inbuf_end) { + ret = -EINVAL; + goto out; + } + + for (i = 0; i < repeat_count; i++) { + /* Would this overflow the output buffer */ + if ((outbuf + 8) > outbuf_end) { + ret = -ENOSPC; + goto out; + } + + memcpy(outbuf, prevbuf, 8); + sw842_copy_to_fifo(outbuf, fifo); + outbuf += 8; + } + break; + + case SW842_TMPL_ZEROS: + /* Would this overflow the output buffer */ + if ((outbuf + 8) > outbuf_end) { + ret = -ENOSPC; + goto out; + } + + memset(outbuf, 0, 8); + sw842_copy_to_fifo(outbuf, fifo); + outbuf += 8; + break; + + default: + if (tmpl > 25) { + ret = -EINVAL; + goto out; + } + + /* Does this go past the end of the input buffer */ + if ((inbuf + 2) > inbuf_end) { + ret = -EINVAL; + goto out; + } + + /* Would this overflow the output buffer */ + if ((outbuf + 8) > outbuf_end) { + ret = -ENOSPC; + goto out; + } + + while (opindex < 4 && + (op = sw842_tmpl_ops[tmpl][opindex++]) + != NULL) { + ret = (*op)(&inbuf, &inbit, &outbuf, fifo); + if (ret) { + ret = -EINVAL; + goto out; + } + sw842_copy_to_fifo(origbuf, fifo); + } + } + } + +out: + if (!ret) + *destlen = (unsigned int)(outbuf - dst); + else + *destlen = 0; + + return ret; +} diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c deleted file mode 100644 index 887196e9b50c..000000000000 --- a/drivers/crypto/nx/nx-842.c +++ /dev/null @@ -1,1603 +0,0 @@ -/* - * Driver for IBM Power 842 compression accelerator - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - * Copyright (C) IBM Corporation, 2012 - * - * Authors: Robert Jennings - * Seth Jennings - */ - -#include -#include -#include -#include -#include - -#include -#include - -#include "nx_csbcpb.h" /* struct nx_csbcpb */ - -#define MODULE_NAME "nx-compress" -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Robert Jennings "); -MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); - -#define SHIFT_4K 12 -#define SHIFT_64K 16 -#define SIZE_4K (1UL << SHIFT_4K) -#define SIZE_64K (1UL << SHIFT_64K) - -/* IO buffer must be 128 byte aligned */ -#define IO_BUFFER_ALIGN 128 - -struct nx842_header { - int blocks_nr; /* number of compressed blocks */ - int offset; /* offset of the first block (from beginning of header) */ - int sizes[0]; /* size of compressed blocks */ -}; - -static inline int nx842_header_size(const struct nx842_header *hdr) -{ - return sizeof(struct nx842_header) + - hdr->blocks_nr * sizeof(hdr->sizes[0]); -} - -/* Macros for fields within nx_csbcpb */ -/* Check the valid bit within the csbcpb valid field */ -#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) - -/* CE macros operate on the completion_extension field bits in the csbcpb. - * CE0 0=full completion, 1=partial completion - * CE1 0=CE0 indicates completion, 1=termination (output may be modified) - * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ -#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) -#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) -#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) - -/* The NX unit accepts data only on 4K page boundaries */ -#define NX842_HW_PAGE_SHIFT SHIFT_4K -#define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT) -#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) - -enum nx842_status { - UNAVAILABLE, - AVAILABLE -}; - -struct ibm_nx842_counters { - atomic64_t comp_complete; - atomic64_t comp_failed; - atomic64_t decomp_complete; - atomic64_t decomp_failed; - atomic64_t swdecomp; - atomic64_t comp_times[32]; - atomic64_t decomp_times[32]; -}; - -static struct nx842_devdata { - struct vio_dev *vdev; - struct device *dev; - struct ibm_nx842_counters *counters; - unsigned int max_sg_len; - unsigned int max_sync_size; - unsigned int max_sync_sg; - enum nx842_status status; -} __rcu *devdata; -static DEFINE_SPINLOCK(devdata_mutex); - -#define NX842_COUNTER_INC(_x) \ -static inline void nx842_inc_##_x( \ - const struct nx842_devdata *dev) { \ - if (dev) \ - atomic64_inc(&dev->counters->_x); \ -} -NX842_COUNTER_INC(comp_complete); -NX842_COUNTER_INC(comp_failed); -NX842_COUNTER_INC(decomp_complete); -NX842_COUNTER_INC(decomp_failed); -NX842_COUNTER_INC(swdecomp); - -#define NX842_HIST_SLOTS 16 - -static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) -{ - int bucket = fls(time); - - if (bucket) - bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); - - atomic64_inc(×[bucket]); -} - -/* NX unit operation flags */ -#define NX842_OP_COMPRESS 0x0 -#define NX842_OP_CRC 0x1 -#define NX842_OP_DECOMPRESS 0x2 -#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) -#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) -#define NX842_OP_ASYNC (1<<23) -#define NX842_OP_NOTIFY (1<<22) -#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) - -static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) -{ - /* No use of DMA mappings within the driver. */ - return 0; -} - -struct nx842_slentry { - unsigned long ptr; /* Real address (use __pa()) */ - unsigned long len; -}; - -/* pHyp scatterlist entry */ -struct nx842_scatterlist { - int entry_nr; /* number of slentries */ - struct nx842_slentry *entries; /* ptr to array of slentries */ -}; - -/* Does not include sizeof(entry_nr) in the size */ -static inline unsigned long nx842_get_scatterlist_size( - struct nx842_scatterlist *sl) -{ - return sl->entry_nr * sizeof(struct nx842_slentry); -} - -static inline unsigned long nx842_get_pa(void *addr) -{ - if (is_vmalloc_addr(addr)) - return page_to_phys(vmalloc_to_page(addr)) - + offset_in_page(addr); - else - return __pa(addr); -} - -static int nx842_build_scatterlist(unsigned long buf, int len, - struct nx842_scatterlist *sl) -{ - unsigned long nextpage; - struct nx842_slentry *entry; - - sl->entry_nr = 0; - - entry = sl->entries; - while (len) { - entry->ptr = nx842_get_pa((void *)buf); - nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); - if (nextpage < buf + len) { - /* we aren't at the end yet */ - if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE)) - /* we are in the middle (or beginning) */ - entry->len = NX842_HW_PAGE_SIZE; - else - /* we are at the beginning */ - entry->len = nextpage - buf; - } else { - /* at the end */ - entry->len = len; - } - - len -= entry->len; - buf += entry->len; - sl->entry_nr++; - entry++; - } - - return 0; -} - -/* - * Working memory for software decompression - */ -struct sw842_fifo { - union { - char f8[256][8]; - char f4[512][4]; - }; - char f2[256][2]; - unsigned char f84_full; - unsigned char f2_full; - unsigned char f8_count; - unsigned char f2_count; - unsigned int f4_count; -}; - -/* - * Working memory for crypto API - */ -struct nx842_workmem { - char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */ - union { - /* hardware working memory */ - struct { - /* scatterlist */ - char slin[SIZE_4K]; - char slout[SIZE_4K]; - /* coprocessor status/parameter block */ - struct nx_csbcpb csbcpb; - }; - /* software working memory */ - struct sw842_fifo swfifo; /* software decompression fifo */ - }; -}; - -int nx842_get_workmem_size(void) -{ - return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE; -} -EXPORT_SYMBOL_GPL(nx842_get_workmem_size); - -int nx842_get_workmem_size_aligned(void) -{ - return sizeof(struct nx842_workmem); -} -EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned); - -static int nx842_validate_result(struct device *dev, - struct cop_status_block *csb) -{ - /* The csb must be valid after returning from vio_h_cop_sync */ - if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { - dev_err(dev, "%s: cspcbp not valid upon completion.\n", - __func__); - dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", - csb->valid, - csb->crb_seq_number, - csb->completion_code, - csb->completion_extension); - dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", - csb->processed_byte_count, - (unsigned long)csb->address); - return -EIO; - } - - /* Check return values from the hardware in the CSB */ - switch (csb->completion_code) { - case 0: /* Completed without error */ - break; - case 64: /* Target bytes > Source bytes during compression */ - case 13: /* Output buffer too small */ - dev_dbg(dev, "%s: Compression output larger than input\n", - __func__); - return -ENOSPC; - case 66: /* Input data contains an illegal template field */ - case 67: /* Template indicates data past the end of the input stream */ - dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", - __func__, csb->completion_code); - return -EINVAL; - default: - dev_dbg(dev, "%s: Unspecified error (code:%d)\n", - __func__, csb->completion_code); - return -EIO; - } - - /* Hardware sanity check */ - if (!NX842_CSBCPB_CE2(csb->completion_extension)) { - dev_err(dev, "%s: No error returned by hardware, but " - "data returned is unusable, contact support.\n" - "(Additional info: csbcbp->processed bytes " - "does not specify processed bytes for the " - "target buffer.)\n", __func__); - return -EIO; - } - - return 0; -} - -/** - * nx842_compress - Compress data using the 842 algorithm - * - * Compression provide by the NX842 coprocessor on IBM Power systems. - * The input buffer is compressed and the result is stored in the - * provided output buffer. - * - * Upon return from this function @outlen contains the length of the - * compressed data. If there is an error then @outlen will be 0 and an - * error will be specified by the return code from this function. - * - * @in: Pointer to input buffer, must be page aligned - * @inlen: Length of input buffer, must be PAGE_SIZE - * @out: Pointer to output buffer - * @outlen: Length of output buffer - * @wrkmem: ptr to buffer for working memory, size determined by - * nx842_get_workmem_size() - * - * Returns: - * 0 Success, output of length @outlen stored in the buffer at @out - * -ENOMEM Unable to allocate internal buffers - * -ENOSPC Output buffer is to small - * -EMSGSIZE XXX Difficult to describe this limitation - * -EIO Internal error - * -ENODEV Hardware unavailable - */ -int nx842_compress(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlen, void *wmem) -{ - struct nx842_header *hdr; - struct nx842_devdata *local_devdata; - struct device *dev = NULL; - struct nx842_workmem *workmem; - struct nx842_scatterlist slin, slout; - struct nx_csbcpb *csbcpb; - int ret = 0, max_sync_size, i, bytesleft, size, hdrsize; - unsigned long inbuf, outbuf, padding; - struct vio_pfo_op op = { - .done = NULL, - .handle = 0, - .timeout = 0, - }; - unsigned long start_time = get_tb(); - - /* - * Make sure input buffer is 64k page aligned. This is assumed since - * this driver is designed for page compression only (for now). This - * is very nice since we can now use direct DDE(s) for the input and - * the alignment is guaranteed. - */ - inbuf = (unsigned long)in; - if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE) - return -EINVAL; - - rcu_read_lock(); - local_devdata = rcu_dereference(devdata); - if (!local_devdata || !local_devdata->dev) { - rcu_read_unlock(); - return -ENODEV; - } - max_sync_size = local_devdata->max_sync_size; - dev = local_devdata->dev; - - /* Create the header */ - hdr = (struct nx842_header *)out; - hdr->blocks_nr = PAGE_SIZE / max_sync_size; - hdrsize = nx842_header_size(hdr); - outbuf = (unsigned long)out + hdrsize; - bytesleft = *outlen - hdrsize; - - /* Init scatterlist */ - workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, - NX842_HW_PAGE_SIZE); - slin.entries = (struct nx842_slentry *)workmem->slin; - slout.entries = (struct nx842_slentry *)workmem->slout; - - /* Init operation */ - op.flags = NX842_OP_COMPRESS; - csbcpb = &workmem->csbcpb; - memset(csbcpb, 0, sizeof(*csbcpb)); - op.csbcpb = nx842_get_pa(csbcpb); - op.out = nx842_get_pa(slout.entries); - - for (i = 0; i < hdr->blocks_nr; i++) { - /* - * Aligning the output blocks to 128 bytes does waste space, - * but it prevents the need for bounce buffers and memory - * copies. It also simplifies the code a lot. In the worst - * case (64k page, 4k max_sync_size), you lose up to - * (128*16)/64k = ~3% the compression factor. For 64k - * max_sync_size, the loss would be at most 128/64k = ~0.2%. - */ - padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf; - outbuf += padding; - bytesleft -= padding; - if (i == 0) - /* save offset into first block in header */ - hdr->offset = padding + hdrsize; - - if (bytesleft <= 0) { - ret = -ENOSPC; - goto unlock; - } - - /* - * NOTE: If the default max_sync_size is changed from 4k - * to 64k, remove the "likely" case below, since a - * scatterlist will always be needed. - */ - if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { - /* Create direct DDE */ - op.in = nx842_get_pa((void *)inbuf); - op.inlen = max_sync_size; - - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(inbuf, max_sync_size, &slin); - op.in = nx842_get_pa(slin.entries); - op.inlen = -nx842_get_scatterlist_size(&slin); - } - - /* - * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect - * DDE is required for the outbuf. - * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must - * also be page aligned (1 in 128/4k=32 chance) in order - * to use a direct DDE. - * This is unlikely, just use an indirect DDE always. - */ - nx842_build_scatterlist(outbuf, - min(bytesleft, max_sync_size), &slout); - /* op.out set before loop */ - op.outlen = -nx842_get_scatterlist_size(&slout); - - /* Send request to pHyp */ - ret = vio_h_cop_sync(local_devdata->vdev, &op); - - /* Check for pHyp error */ - if (ret) { - dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", - __func__, ret, op.hcall_err); - ret = -EIO; - goto unlock; - } - - /* Check for hardware error */ - ret = nx842_validate_result(dev, &csbcpb->csb); - if (ret && ret != -ENOSPC) - goto unlock; - - /* Handle incompressible data */ - if (unlikely(ret == -ENOSPC)) { - if (bytesleft < max_sync_size) { - /* - * Not enough space left in the output buffer - * to store uncompressed block - */ - goto unlock; - } else { - /* Store incompressible block */ - memcpy((void *)outbuf, (void *)inbuf, - max_sync_size); - hdr->sizes[i] = -max_sync_size; - outbuf += max_sync_size; - bytesleft -= max_sync_size; - /* Reset ret, incompressible data handled */ - ret = 0; - } - } else { - /* Normal case, compression was successful */ - size = csbcpb->csb.processed_byte_count; - dev_dbg(dev, "%s: processed_bytes=%d\n", - __func__, size); - hdr->sizes[i] = size; - outbuf += size; - bytesleft -= size; - } - - inbuf += max_sync_size; - } - - *outlen = (unsigned int)(outbuf - (unsigned long)out); - -unlock: - if (ret) - nx842_inc_comp_failed(local_devdata); - else { - nx842_inc_comp_complete(local_devdata); - ibm_nx842_incr_hist(local_devdata->counters->comp_times, - (get_tb() - start_time) / tb_ticks_per_usec); - } - rcu_read_unlock(); - return ret; -} -EXPORT_SYMBOL_GPL(nx842_compress); - -static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, - const void *); - -/** - * nx842_decompress - Decompress data using the 842 algorithm - * - * Decompression provide by the NX842 coprocessor on IBM Power systems. - * The input buffer is decompressed and the result is stored in the - * provided output buffer. The size allocated to the output buffer is - * provided by the caller of this function in @outlen. Upon return from - * this function @outlen contains the length of the decompressed data. - * If there is an error then @outlen will be 0 and an error will be - * specified by the return code from this function. - * - * @in: Pointer to input buffer, will use bounce buffer if not 128 byte - * aligned - * @inlen: Length of input buffer - * @out: Pointer to output buffer, must be page aligned - * @outlen: Length of output buffer, must be PAGE_SIZE - * @wrkmem: ptr to buffer for working memory, size determined by - * nx842_get_workmem_size() - * - * Returns: - * 0 Success, output of length @outlen stored in the buffer at @out - * -ENODEV Hardware decompression device is unavailable - * -ENOMEM Unable to allocate internal buffers - * -ENOSPC Output buffer is to small - * -EINVAL Bad input data encountered when attempting decompress - * -EIO Internal error - */ -int nx842_decompress(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlen, void *wmem) -{ - struct nx842_header *hdr; - struct nx842_devdata *local_devdata; - struct device *dev = NULL; - struct nx842_workmem *workmem; - struct nx842_scatterlist slin, slout; - struct nx_csbcpb *csbcpb; - int ret = 0, i, size, max_sync_size; - unsigned long inbuf, outbuf; - struct vio_pfo_op op = { - .done = NULL, - .handle = 0, - .timeout = 0, - }; - unsigned long start_time = get_tb(); - - /* Ensure page alignment and size */ - outbuf = (unsigned long)out; - if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE) - return -EINVAL; - - rcu_read_lock(); - local_devdata = rcu_dereference(devdata); - if (local_devdata) - dev = local_devdata->dev; - - /* Get header */ - hdr = (struct nx842_header *)in; - - workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, - NX842_HW_PAGE_SIZE); - - inbuf = (unsigned long)in + hdr->offset; - if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) { - /* Copy block(s) into bounce buffer for alignment */ - memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset); - inbuf = (unsigned long)workmem->bounce; - } - - /* Init scatterlist */ - slin.entries = (struct nx842_slentry *)workmem->slin; - slout.entries = (struct nx842_slentry *)workmem->slout; - - /* Init operation */ - op.flags = NX842_OP_DECOMPRESS; - csbcpb = &workmem->csbcpb; - memset(csbcpb, 0, sizeof(*csbcpb)); - op.csbcpb = nx842_get_pa(csbcpb); - - /* - * max_sync_size may have changed since compression, - * so we can't read it from the device info. We need - * to derive it from hdr->blocks_nr. - */ - max_sync_size = PAGE_SIZE / hdr->blocks_nr; - - for (i = 0; i < hdr->blocks_nr; i++) { - /* Skip padding */ - inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN); - - if (hdr->sizes[i] < 0) { - /* Negative sizes indicate uncompressed data blocks */ - size = abs(hdr->sizes[i]); - memcpy((void *)outbuf, (void *)inbuf, size); - outbuf += size; - inbuf += size; - continue; - } - - if (!dev) - goto sw; - - /* - * The better the compression, the more likely the "likely" - * case becomes. - */ - if (likely((inbuf & NX842_HW_PAGE_MASK) == - ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { - /* Create direct DDE */ - op.in = nx842_get_pa((void *)inbuf); - op.inlen = hdr->sizes[i]; - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); - op.in = nx842_get_pa(slin.entries); - op.inlen = -nx842_get_scatterlist_size(&slin); - } - - /* - * NOTE: If the default max_sync_size is changed from 4k - * to 64k, remove the "likely" case below, since a - * scatterlist will always be needed. - */ - if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { - /* Create direct DDE */ - op.out = nx842_get_pa((void *)outbuf); - op.outlen = max_sync_size; - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(outbuf, max_sync_size, &slout); - op.out = nx842_get_pa(slout.entries); - op.outlen = -nx842_get_scatterlist_size(&slout); - } - - /* Send request to pHyp */ - ret = vio_h_cop_sync(local_devdata->vdev, &op); - - /* Check for pHyp error */ - if (ret) { - dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", - __func__, ret, op.hcall_err); - dev = NULL; - goto sw; - } - - /* Check for hardware error */ - ret = nx842_validate_result(dev, &csbcpb->csb); - if (ret) { - dev = NULL; - goto sw; - } - - /* HW decompression success */ - inbuf += hdr->sizes[i]; - outbuf += csbcpb->csb.processed_byte_count; - continue; - -sw: - /* software decompression */ - size = max_sync_size; - ret = sw842_decompress( - (unsigned char *)inbuf, hdr->sizes[i], - (unsigned char *)outbuf, &size, wmem); - if (ret) - pr_debug("%s: sw842_decompress failed with %d\n", - __func__, ret); - - if (ret) { - if (ret != -ENOSPC && ret != -EINVAL && - ret != -EMSGSIZE) - ret = -EIO; - goto unlock; - } - - /* SW decompression success */ - inbuf += hdr->sizes[i]; - outbuf += size; - } - - *outlen = (unsigned int)(outbuf - (unsigned long)out); - -unlock: - if (ret) - /* decompress fail */ - nx842_inc_decomp_failed(local_devdata); - else { - if (!dev) - /* software decompress */ - nx842_inc_swdecomp(local_devdata); - nx842_inc_decomp_complete(local_devdata); - ibm_nx842_incr_hist(local_devdata->counters->decomp_times, - (get_tb() - start_time) / tb_ticks_per_usec); - } - - rcu_read_unlock(); - return ret; -} -EXPORT_SYMBOL_GPL(nx842_decompress); - -/** - * nx842_OF_set_defaults -- Set default (disabled) values for devdata - * - * @devdata - struct nx842_devdata to update - * - * Returns: - * 0 on success - * -ENOENT if @devdata ptr is NULL - */ -static int nx842_OF_set_defaults(struct nx842_devdata *devdata) -{ - if (devdata) { - devdata->max_sync_size = 0; - devdata->max_sync_sg = 0; - devdata->max_sg_len = 0; - devdata->status = UNAVAILABLE; - return 0; - } else - return -ENOENT; -} - -/** - * nx842_OF_upd_status -- Update the device info from OF status prop - * - * The status property indicates if the accelerator is enabled. If the - * device is in the OF tree it indicates that the hardware is present. - * The status field indicates if the device is enabled when the status - * is 'okay'. Otherwise the device driver will be disabled. - * - * @devdata - struct nx842_devdata to update - * @prop - struct property point containing the maxsyncop for the update - * - * Returns: - * 0 - Device is available - * -EINVAL - Device is not available - */ -static int nx842_OF_upd_status(struct nx842_devdata *devdata, - struct property *prop) { - int ret = 0; - const char *status = (const char *)prop->value; - - if (!strncmp(status, "okay", (size_t)prop->length)) { - devdata->status = AVAILABLE; - } else { - dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", - __func__, status); - devdata->status = UNAVAILABLE; - } - - return ret; -} - -/** - * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop - * - * Definition of the 'ibm,max-sg-len' OF property: - * This field indicates the maximum byte length of a scatter list - * for the platform facility. It is a single cell encoded as with encode-int. - * - * Example: - * # od -x ibm,max-sg-len - * 0000000 0000 0ff0 - * - * In this example, the maximum byte length of a scatter list is - * 0x0ff0 (4,080). - * - * @devdata - struct nx842_devdata to update - * @prop - struct property point containing the maxsyncop for the update - * - * Returns: - * 0 on success - * -EINVAL on failure - */ -static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, - struct property *prop) { - int ret = 0; - const int *maxsglen = prop->value; - - if (prop->length != sizeof(*maxsglen)) { - dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); - dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, - prop->length, sizeof(*maxsglen)); - ret = -EINVAL; - } else { - devdata->max_sg_len = (unsigned int)min(*maxsglen, - (int)NX842_HW_PAGE_SIZE); - } - - return ret; -} - -/** - * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop - * - * Definition of the 'ibm,max-sync-cop' OF property: - * Two series of cells. The first series of cells represents the maximums - * that can be synchronously compressed. The second series of cells - * represents the maximums that can be synchronously decompressed. - * 1. The first cell in each series contains the count of the number of - * data length, scatter list elements pairs that follow – each being - * of the form - * a. One cell data byte length - * b. One cell total number of scatter list elements - * - * Example: - * # od -x ibm,max-sync-cop - * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 - * 0000020 0000 1000 0000 01fe - * - * In this example, compression supports 0x1000 (4,096) data byte length - * and 0x1fe (510) total scatter list elements. Decompression supports - * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list - * elements. - * - * @devdata - struct nx842_devdata to update - * @prop - struct property point containing the maxsyncop for the update - * - * Returns: - * 0 on success - * -EINVAL on failure - */ -static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, - struct property *prop) { - int ret = 0; - const struct maxsynccop_t { - int comp_elements; - int comp_data_limit; - int comp_sg_limit; - int decomp_elements; - int decomp_data_limit; - int decomp_sg_limit; - } *maxsynccop; - - if (prop->length != sizeof(*maxsynccop)) { - dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); - dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, - sizeof(*maxsynccop)); - ret = -EINVAL; - goto out; - } - - maxsynccop = (const struct maxsynccop_t *)prop->value; - - /* Use one limit rather than separate limits for compression and - * decompression. Set a maximum for this so as not to exceed the - * size that the header can support and round the value down to - * the hardware page size (4K) */ - devdata->max_sync_size = - (unsigned int)min(maxsynccop->comp_data_limit, - maxsynccop->decomp_data_limit); - - devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, - SIZE_64K); - - if (devdata->max_sync_size < SIZE_4K) { - dev_err(devdata->dev, "%s: hardware max data size (%u) is " - "less than the driver minimum, unable to use " - "the hardware device\n", - __func__, devdata->max_sync_size); - ret = -EINVAL; - goto out; - } - - devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit, - maxsynccop->decomp_sg_limit); - if (devdata->max_sync_sg < 1) { - dev_err(devdata->dev, "%s: hardware max sg size (%u) is " - "less than the driver minimum, unable to use " - "the hardware device\n", - __func__, devdata->max_sync_sg); - ret = -EINVAL; - goto out; - } - -out: - return ret; -} - -/** - * - * nx842_OF_upd -- Handle OF properties updates for the device. - * - * Set all properties from the OF tree. Optionally, a new property - * can be provided by the @new_prop pointer to overwrite an existing value. - * The device will remain disabled until all values are valid, this function - * will return an error for updates unless all values are valid. - * - * @new_prop: If not NULL, this property is being updated. If NULL, update - * all properties from the current values in the OF tree. - * - * Returns: - * 0 - Success - * -ENOMEM - Could not allocate memory for new devdata structure - * -EINVAL - property value not found, new_prop is not a recognized - * property for the device or property value is not valid. - * -ENODEV - Device is not available - */ -static int nx842_OF_upd(struct property *new_prop) -{ - struct nx842_devdata *old_devdata = NULL; - struct nx842_devdata *new_devdata = NULL; - struct device_node *of_node = NULL; - struct property *status = NULL; - struct property *maxsglen = NULL; - struct property *maxsyncop = NULL; - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&devdata_mutex, flags); - old_devdata = rcu_dereference_check(devdata, - lockdep_is_held(&devdata_mutex)); - if (old_devdata) - of_node = old_devdata->dev->of_node; - - if (!old_devdata || !of_node) { - pr_err("%s: device is not available\n", __func__); - spin_unlock_irqrestore(&devdata_mutex, flags); - return -ENODEV; - } - - new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); - if (!new_devdata) { - dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); - ret = -ENOMEM; - goto error_out; - } - - memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); - new_devdata->counters = old_devdata->counters; - - /* Set ptrs for existing properties */ - status = of_find_property(of_node, "status", NULL); - maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); - maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); - if (!status || !maxsglen || !maxsyncop) { - dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); - ret = -EINVAL; - goto error_out; - } - - /* - * If this is a property update, there are only certain properties that - * we care about. Bail if it isn't in the below list - */ - if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || - strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || - strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) - goto out; - - /* Perform property updates */ - ret = nx842_OF_upd_status(new_devdata, status); - if (ret) - goto error_out; - - ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); - if (ret) - goto error_out; - - ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); - if (ret) - goto error_out; - -out: - dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", - __func__, new_devdata->max_sync_size, - old_devdata->max_sync_size); - dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", - __func__, new_devdata->max_sync_sg, - old_devdata->max_sync_sg); - dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", - __func__, new_devdata->max_sg_len, - old_devdata->max_sg_len); - - rcu_assign_pointer(devdata, new_devdata); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - dev_set_drvdata(new_devdata->dev, new_devdata); - kfree(old_devdata); - return 0; - -error_out: - if (new_devdata) { - dev_info(old_devdata->dev, "%s: device disabled\n", __func__); - nx842_OF_set_defaults(new_devdata); - rcu_assign_pointer(devdata, new_devdata); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - dev_set_drvdata(new_devdata->dev, new_devdata); - kfree(old_devdata); - } else { - dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); - spin_unlock_irqrestore(&devdata_mutex, flags); - } - - if (!ret) - ret = -EINVAL; - return ret; -} - -/** - * nx842_OF_notifier - Process updates to OF properties for the device - * - * @np: notifier block - * @action: notifier action - * @update: struct pSeries_reconfig_prop_update pointer if action is - * PSERIES_UPDATE_PROPERTY - * - * Returns: - * NOTIFY_OK on success - * NOTIFY_BAD encoded with error number on failure, use - * notifier_to_errno() to decode this value - */ -static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, - void *data) -{ - struct of_reconfig_data *upd = data; - struct nx842_devdata *local_devdata; - struct device_node *node = NULL; - - rcu_read_lock(); - local_devdata = rcu_dereference(devdata); - if (local_devdata) - node = local_devdata->dev->of_node; - - if (local_devdata && - action == OF_RECONFIG_UPDATE_PROPERTY && - !strcmp(upd->dn->name, node->name)) { - rcu_read_unlock(); - nx842_OF_upd(upd->prop); - } else - rcu_read_unlock(); - - return NOTIFY_OK; -} - -static struct notifier_block nx842_of_nb = { - .notifier_call = nx842_OF_notifier, -}; - -#define nx842_counter_read(_name) \ -static ssize_t nx842_##_name##_show(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) { \ - struct nx842_devdata *local_devdata; \ - int p = 0; \ - rcu_read_lock(); \ - local_devdata = rcu_dereference(devdata); \ - if (local_devdata) \ - p = snprintf(buf, PAGE_SIZE, "%ld\n", \ - atomic64_read(&local_devdata->counters->_name)); \ - rcu_read_unlock(); \ - return p; \ -} - -#define NX842DEV_COUNTER_ATTR_RO(_name) \ - nx842_counter_read(_name); \ - static struct device_attribute dev_attr_##_name = __ATTR(_name, \ - 0444, \ - nx842_##_name##_show,\ - NULL); - -NX842DEV_COUNTER_ATTR_RO(comp_complete); -NX842DEV_COUNTER_ATTR_RO(comp_failed); -NX842DEV_COUNTER_ATTR_RO(decomp_complete); -NX842DEV_COUNTER_ATTR_RO(decomp_failed); -NX842DEV_COUNTER_ATTR_RO(swdecomp); - -static ssize_t nx842_timehist_show(struct device *, - struct device_attribute *, char *); - -static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, - nx842_timehist_show, NULL); -static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, - 0444, nx842_timehist_show, NULL); - -static ssize_t nx842_timehist_show(struct device *dev, - struct device_attribute *attr, char *buf) { - char *p = buf; - struct nx842_devdata *local_devdata; - atomic64_t *times; - int bytes_remain = PAGE_SIZE; - int bytes; - int i; - - rcu_read_lock(); - local_devdata = rcu_dereference(devdata); - if (!local_devdata) { - rcu_read_unlock(); - return 0; - } - - if (attr == &dev_attr_comp_times) - times = local_devdata->counters->comp_times; - else if (attr == &dev_attr_decomp_times) - times = local_devdata->counters->decomp_times; - else { - rcu_read_unlock(); - return 0; - } - - for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { - bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n", - i ? (2<<(i-1)) : 0, (2<vdev != NULL) { - dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); - ret = -1; - goto error_unlock; - } - - dev_set_drvdata(&viodev->dev, NULL); - - new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); - if (!new_devdata) { - dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); - ret = -ENOMEM; - goto error_unlock; - } - - new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), - GFP_NOFS); - if (!new_devdata->counters) { - dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); - ret = -ENOMEM; - goto error_unlock; - } - - new_devdata->vdev = viodev; - new_devdata->dev = &viodev->dev; - nx842_OF_set_defaults(new_devdata); - - rcu_assign_pointer(devdata, new_devdata); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - kfree(old_devdata); - - of_reconfig_notifier_register(&nx842_of_nb); - - ret = nx842_OF_upd(NULL); - if (ret && ret != -ENODEV) { - dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); - ret = -1; - goto error; - } - - rcu_read_lock(); - dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); - rcu_read_unlock(); - - if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { - dev_err(&viodev->dev, "could not create sysfs device attributes\n"); - ret = -1; - goto error; - } - - return 0; - -error_unlock: - spin_unlock_irqrestore(&devdata_mutex, flags); - if (new_devdata) - kfree(new_devdata->counters); - kfree(new_devdata); -error: - return ret; -} - -static int __exit nx842_remove(struct vio_dev *viodev) -{ - struct nx842_devdata *old_devdata; - unsigned long flags; - - pr_info("Removing IBM Power 842 compression device\n"); - sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); - - spin_lock_irqsave(&devdata_mutex, flags); - old_devdata = rcu_dereference_check(devdata, - lockdep_is_held(&devdata_mutex)); - of_reconfig_notifier_unregister(&nx842_of_nb); - RCU_INIT_POINTER(devdata, NULL); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - dev_set_drvdata(&viodev->dev, NULL); - if (old_devdata) - kfree(old_devdata->counters); - kfree(old_devdata); - return 0; -} - -static struct vio_device_id nx842_driver_ids[] = { - {"ibm,compression-v1", "ibm,compression"}, - {"", ""}, -}; - -static struct vio_driver nx842_driver = { - .name = MODULE_NAME, - .probe = nx842_probe, - .remove = __exit_p(nx842_remove), - .get_desired_dma = nx842_get_desired_dma, - .id_table = nx842_driver_ids, -}; - -static int __init nx842_init(void) -{ - struct nx842_devdata *new_devdata; - pr_info("Registering IBM Power 842 compression driver\n"); - - RCU_INIT_POINTER(devdata, NULL); - new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); - if (!new_devdata) { - pr_err("Could not allocate memory for device data\n"); - return -ENOMEM; - } - new_devdata->status = UNAVAILABLE; - RCU_INIT_POINTER(devdata, new_devdata); - - return vio_register_driver(&nx842_driver); -} - -module_init(nx842_init); - -static void __exit nx842_exit(void) -{ - struct nx842_devdata *old_devdata; - unsigned long flags; - - pr_info("Exiting IBM Power 842 compression driver\n"); - spin_lock_irqsave(&devdata_mutex, flags); - old_devdata = rcu_dereference_check(devdata, - lockdep_is_held(&devdata_mutex)); - RCU_INIT_POINTER(devdata, NULL); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - if (old_devdata) - dev_set_drvdata(old_devdata->dev, NULL); - kfree(old_devdata); - vio_unregister_driver(&nx842_driver); -} - -module_exit(nx842_exit); - -/********************************* - * 842 software decompressor -*********************************/ -typedef int (*sw842_template_op)(const char **, int *, unsigned char **, - struct sw842_fifo *); - -static int sw842_data8(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_data4(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_data2(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr8(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr4(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr2(const char **, int *, unsigned char **, - struct sw842_fifo *); - -/* special templates */ -#define SW842_TMPL_REPEAT 0x1B -#define SW842_TMPL_ZEROS 0x1C -#define SW842_TMPL_EOF 0x1E - -static sw842_template_op sw842_tmpl_ops[26][4] = { - { sw842_data8, NULL}, /* 0 (00000) */ - { sw842_data4, sw842_data2, sw842_ptr2, NULL}, - { sw842_data4, sw842_ptr2, sw842_data2, NULL}, - { sw842_data4, sw842_ptr2, sw842_ptr2, NULL}, - { sw842_data4, sw842_ptr4, NULL}, - { sw842_data2, sw842_ptr2, sw842_data4, NULL}, - { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2}, - { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2}, - { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,}, - { sw842_data2, sw842_ptr2, sw842_ptr4, NULL}, - { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */ - { sw842_ptr2, sw842_data4, sw842_ptr2, NULL}, - { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2}, - { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2}, - { sw842_ptr2, sw842_data2, sw842_ptr4, NULL}, - { sw842_ptr2, sw842_ptr2, sw842_data4, NULL}, - { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL}, - { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */ - { sw842_ptr4, sw842_data2, sw842_ptr2, NULL}, - { sw842_ptr4, sw842_ptr2, sw842_data2, NULL}, - { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL}, - { sw842_ptr4, sw842_ptr4, NULL}, - { sw842_ptr8, NULL} -}; - -/* Software decompress helpers */ - -static uint8_t sw842_get_byte(const char *buf, int bit) -{ - uint8_t tmpl; - uint16_t tmp; - tmp = htons(*(uint16_t *)(buf)); - tmp = (uint16_t)(tmp << bit); - tmp = ntohs(tmp); - memcpy(&tmpl, &tmp, 1); - return tmpl; -} - -static uint8_t sw842_get_template(const char **buf, int *bit) -{ - uint8_t byte; - byte = sw842_get_byte(*buf, *bit); - byte = byte >> 3; - byte &= 0x1F; - *buf += (*bit + 5) / 8; - *bit = (*bit + 5) % 8; - return byte; -} - -/* repeat_count happens to be 5-bit too (like the template) */ -static uint8_t sw842_get_repeat_count(const char **buf, int *bit) -{ - uint8_t byte; - byte = sw842_get_byte(*buf, *bit); - byte = byte >> 2; - byte &= 0x3F; - *buf += (*bit + 6) / 8; - *bit = (*bit + 6) % 8; - return byte; -} - -static uint8_t sw842_get_ptr2(const char **buf, int *bit) -{ - uint8_t ptr; - ptr = sw842_get_byte(*buf, *bit); - (*buf)++; - return ptr; -} - -static uint16_t sw842_get_ptr4(const char **buf, int *bit, - struct sw842_fifo *fifo) -{ - uint16_t ptr; - ptr = htons(*(uint16_t *)(*buf)); - ptr = (uint16_t)(ptr << *bit); - ptr = ptr >> 7; - ptr &= 0x01FF; - *buf += (*bit + 9) / 8; - *bit = (*bit + 9) % 8; - return ptr; -} - -static uint8_t sw842_get_ptr8(const char **buf, int *bit, - struct sw842_fifo *fifo) -{ - return sw842_get_ptr2(buf, bit); -} - -/* Software decompress template ops */ - -static int sw842_data8(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - int ret; - - ret = sw842_data4(inbuf, inbit, outbuf, fifo); - if (ret) - return ret; - ret = sw842_data4(inbuf, inbit, outbuf, fifo); - return ret; -} - -static int sw842_data4(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - int ret; - - ret = sw842_data2(inbuf, inbit, outbuf, fifo); - if (ret) - return ret; - ret = sw842_data2(inbuf, inbit, outbuf, fifo); - return ret; -} - -static int sw842_data2(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - **outbuf = sw842_get_byte(*inbuf, *inbit); - (*inbuf)++; - (*outbuf)++; - **outbuf = sw842_get_byte(*inbuf, *inbit); - (*inbuf)++; - (*outbuf)++; - return 0; -} - -static int sw842_ptr8(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint8_t ptr; - ptr = sw842_get_ptr8(inbuf, inbit, fifo); - if (!fifo->f84_full && (ptr >= fifo->f8_count)) - return 1; - memcpy(*outbuf, fifo->f8[ptr], 8); - *outbuf += 8; - return 0; -} - -static int sw842_ptr4(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint16_t ptr; - ptr = sw842_get_ptr4(inbuf, inbit, fifo); - if (!fifo->f84_full && (ptr >= fifo->f4_count)) - return 1; - memcpy(*outbuf, fifo->f4[ptr], 4); - *outbuf += 4; - return 0; -} - -static int sw842_ptr2(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint8_t ptr; - ptr = sw842_get_ptr2(inbuf, inbit); - if (!fifo->f2_full && (ptr >= fifo->f2_count)) - return 1; - memcpy(*outbuf, fifo->f2[ptr], 2); - *outbuf += 2; - return 0; -} - -static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo) -{ - unsigned char initial_f2count = fifo->f2_count; - - memcpy(fifo->f8[fifo->f8_count], buf, 8); - fifo->f4_count += 2; - fifo->f8_count += 1; - - if (!fifo->f84_full && fifo->f4_count >= 512) { - fifo->f84_full = 1; - fifo->f4_count /= 512; - } - - memcpy(fifo->f2[fifo->f2_count++], buf, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2); - if (fifo->f2_count < initial_f2count) - fifo->f2_full = 1; -} - -static int sw842_decompress(const unsigned char *src, int srclen, - unsigned char *dst, int *destlen, - const void *wrkmem) -{ - uint8_t tmpl; - const char *inbuf; - int inbit = 0; - unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf; - const char *inbuf_end; - sw842_template_op op; - int opindex; - int i, repeat_count; - struct sw842_fifo *fifo; - int ret = 0; - - fifo = &((struct nx842_workmem *)(wrkmem))->swfifo; - memset(fifo, 0, sizeof(*fifo)); - - origbuf = NULL; - inbuf = src; - inbuf_end = src + srclen; - outbuf = dst; - outbuf_end = dst + *destlen; - - while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) { - if (inbuf >= inbuf_end) { - ret = -EINVAL; - goto out; - } - - opindex = 0; - prevbuf = origbuf; - origbuf = outbuf; - switch (tmpl) { - case SW842_TMPL_REPEAT: - if (prevbuf == NULL) { - ret = -EINVAL; - goto out; - } - - repeat_count = sw842_get_repeat_count(&inbuf, - &inbit) + 1; - - /* Did the repeat count advance past the end of input */ - if (inbuf > inbuf_end) { - ret = -EINVAL; - goto out; - } - - for (i = 0; i < repeat_count; i++) { - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - memcpy(outbuf, prevbuf, 8); - sw842_copy_to_fifo(outbuf, fifo); - outbuf += 8; - } - break; - - case SW842_TMPL_ZEROS: - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - memset(outbuf, 0, 8); - sw842_copy_to_fifo(outbuf, fifo); - outbuf += 8; - break; - - default: - if (tmpl > 25) { - ret = -EINVAL; - goto out; - } - - /* Does this go past the end of the input buffer */ - if ((inbuf + 2) > inbuf_end) { - ret = -EINVAL; - goto out; - } - - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - while (opindex < 4 && - (op = sw842_tmpl_ops[tmpl][opindex++]) - != NULL) { - ret = (*op)(&inbuf, &inbit, &outbuf, fifo); - if (ret) { - ret = -EINVAL; - goto out; - } - sw842_copy_to_fifo(origbuf, fifo); - } - } - } - -out: - if (!ret) - *destlen = (unsigned int)(outbuf - dst); - else - *destlen = 0; - - return ret; -} -- cgit v1.2.3 From 7011a122383e36dab594406720fa1d089e0be8f9 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:17 -0400 Subject: crypto: nx - add NX-842 platform frontend driver Add NX-842 frontend that allows using either the pSeries platform or PowerNV platform driver (to be added by later patch) for the NX-842 hardware. Update the MAINTAINERS file to include the new filenames. Update Kconfig files to clarify titles and descriptions, and correct dependencies. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- MAINTAINERS | 2 +- drivers/crypto/Kconfig | 10 +-- drivers/crypto/nx/Kconfig | 35 ++++++--- drivers/crypto/nx/Makefile | 4 +- drivers/crypto/nx/nx-842-pseries.c | 57 +++++++-------- drivers/crypto/nx/nx-842.c | 144 +++++++++++++++++++++++++++++++++++++ drivers/crypto/nx/nx-842.h | 32 +++++++++ include/linux/nx842.h | 10 +-- 8 files changed, 245 insertions(+), 49 deletions(-) create mode 100644 drivers/crypto/nx/nx-842.c create mode 100644 drivers/crypto/nx/nx-842.h (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index a13b4b10ff46..912c9d9f741c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4861,7 +4861,7 @@ F: drivers/crypto/nx/ IBM Power 842 compression accelerator M: Dan Streetman S: Supported -F: drivers/crypto/nx/nx-842.c +F: drivers/crypto/nx/nx-842* F: include/linux/nx842.h F: include/linux/sw842.h F: crypto/842.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 8a76a012be61..0889e496d886 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -330,11 +330,13 @@ config CRYPTO_DEV_S5P algorithms execution. config CRYPTO_DEV_NX - bool "Support for IBM Power7+ in-Nest cryptographic acceleration" - depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN - default n + bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration" + depends on PPC64 help - Support for Power7+ in-Nest cryptographic acceleration. + This enables support for the NX hardware cryptographic accelerator + coprocessor that is in IBM PowerPC P7+ or later processors. This + does not actually enable any drivers, it only allows you to select + which acceleration type (encryption and/or compression) to enable. if CRYPTO_DEV_NX source "drivers/crypto/nx/Kconfig" diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index f82616621ae1..34013f7d46c8 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig @@ -1,7 +1,9 @@ + config CRYPTO_DEV_NX_ENCRYPT - tristate "Encryption acceleration support" - depends on PPC64 && IBMVIO + tristate "Encryption acceleration support on pSeries platform" + depends on PPC_PSERIES && IBMVIO && !CPU_LITTLE_ENDIAN default y + select CRYPTO_ALGAPI select CRYPTO_AES select CRYPTO_CBC select CRYPTO_ECB @@ -12,15 +14,30 @@ config CRYPTO_DEV_NX_ENCRYPT select CRYPTO_SHA256 select CRYPTO_SHA512 help - Support for Power7+ in-Nest encryption acceleration. This - module supports acceleration for AES and SHA2 algorithms. If you - choose 'M' here, this module will be called nx_crypto. + Support for PowerPC Nest (NX) encryption acceleration. This + module supports acceleration for AES and SHA2 algorithms on + the pSeries platform. If you choose 'M' here, this module + will be called nx_crypto. config CRYPTO_DEV_NX_COMPRESS tristate "Compression acceleration support" - depends on PPC64 && IBMVIO default y help - Support for Power7+ in-Nest compression acceleration. This - module supports acceleration for AES and SHA2 algorithms. If you - choose 'M' here, this module will be called nx_compress. + Support for PowerPC Nest (NX) compression acceleration. This + module supports acceleration for compressing memory with the 842 + algorithm. One of the platform drivers must be selected also. + If you choose 'M' here, this module will be called nx_compress. + +if CRYPTO_DEV_NX_COMPRESS + +config CRYPTO_DEV_NX_COMPRESS_PSERIES + tristate "Compression acceleration support on pSeries platform" + depends on PPC_PSERIES && IBMVIO && !CPU_LITTLE_ENDIAN + default y + help + Support for PowerPC Nest (NX) compression acceleration. This + module supports acceleration for compressing memory with the 842 + algorithm. This supports NX hardware on the pSeries platform. + If you choose 'M' here, this module will be called nx_compress_pseries. + +endif diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index 8669ffa8dc5c..5d9f4bc15209 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -11,4 +11,6 @@ nx-crypto-objs := nx.o \ nx-sha512.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o -nx-compress-objs := nx-842-pseries.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o +nx-compress-objs := nx-842.o +nx-compress-pseries-objs := nx-842-pseries.o diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 887196e9b50c..9b83c9e7fd73 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -21,18 +21,13 @@ * Seth Jennings */ -#include -#include -#include -#include -#include - #include #include +#include "nx-842.h" #include "nx_csbcpb.h" /* struct nx_csbcpb */ -#define MODULE_NAME "nx-compress" +#define MODULE_NAME NX842_PSERIES_MODULE_NAME MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Jennings "); MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); @@ -236,18 +231,6 @@ struct nx842_workmem { }; }; -int nx842_get_workmem_size(void) -{ - return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE; -} -EXPORT_SYMBOL_GPL(nx842_get_workmem_size); - -int nx842_get_workmem_size_aligned(void) -{ - return sizeof(struct nx842_workmem); -} -EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned); - static int nx842_validate_result(struct device *dev, struct cop_status_block *csb) { @@ -300,7 +283,7 @@ static int nx842_validate_result(struct device *dev, } /** - * nx842_compress - Compress data using the 842 algorithm + * nx842_pseries_compress - Compress data using the 842 algorithm * * Compression provide by the NX842 coprocessor on IBM Power systems. * The input buffer is compressed and the result is stored in the @@ -315,7 +298,7 @@ static int nx842_validate_result(struct device *dev, * @out: Pointer to output buffer * @outlen: Length of output buffer * @wrkmem: ptr to buffer for working memory, size determined by - * nx842_get_workmem_size() + * NX842_MEM_COMPRESS * * Returns: * 0 Success, output of length @outlen stored in the buffer at @out @@ -325,8 +308,9 @@ static int nx842_validate_result(struct device *dev, * -EIO Internal error * -ENODEV Hardware unavailable */ -int nx842_compress(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlen, void *wmem) +static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlen, + void *wmem) { struct nx842_header *hdr; struct nx842_devdata *local_devdata; @@ -493,13 +477,12 @@ unlock: rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(nx842_compress); static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, const void *); /** - * nx842_decompress - Decompress data using the 842 algorithm + * nx842_pseries_decompress - Decompress data using the 842 algorithm * * Decompression provide by the NX842 coprocessor on IBM Power systems. * The input buffer is decompressed and the result is stored in the @@ -515,7 +498,7 @@ static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, * @out: Pointer to output buffer, must be page aligned * @outlen: Length of output buffer, must be PAGE_SIZE * @wrkmem: ptr to buffer for working memory, size determined by - * nx842_get_workmem_size() + * NX842_MEM_COMPRESS * * Returns: * 0 Success, output of length @outlen stored in the buffer at @out @@ -525,8 +508,9 @@ static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, * -EINVAL Bad input data encountered when attempting decompress * -EIO Internal error */ -int nx842_decompress(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlen, void *wmem) +static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlen, + void *wmem) { struct nx842_header *hdr; struct nx842_devdata *local_devdata; @@ -694,7 +678,6 @@ unlock: rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(nx842_decompress); /** * nx842_OF_set_defaults -- Set default (disabled) values for devdata @@ -1130,6 +1113,12 @@ static struct attribute_group nx842_attribute_group = { .attrs = nx842_sysfs_entries, }; +static struct nx842_driver nx842_pseries_driver = { + .owner = THIS_MODULE, + .compress = nx842_pseries_compress, + .decompress = nx842_pseries_decompress, +}; + static int __init nx842_probe(struct vio_dev *viodev, const struct vio_device_id *id) { @@ -1192,6 +1181,8 @@ static int __init nx842_probe(struct vio_dev *viodev, goto error; } + nx842_register_driver(&nx842_pseries_driver); + return 0; error_unlock: @@ -1222,11 +1213,14 @@ static int __exit nx842_remove(struct vio_dev *viodev) if (old_devdata) kfree(old_devdata->counters); kfree(old_devdata); + + nx842_unregister_driver(&nx842_pseries_driver); + return 0; } static struct vio_device_id nx842_driver_ids[] = { - {"ibm,compression-v1", "ibm,compression"}, + {NX842_PSERIES_COMPAT_NAME "-v1", NX842_PSERIES_COMPAT_NAME}, {"", ""}, }; @@ -1243,6 +1237,8 @@ static int __init nx842_init(void) struct nx842_devdata *new_devdata; pr_info("Registering IBM Power 842 compression driver\n"); + BUILD_BUG_ON(sizeof(struct nx842_workmem) > NX842_MEM_COMPRESS); + RCU_INIT_POINTER(devdata, NULL); new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); if (!new_devdata) { @@ -1272,6 +1268,7 @@ static void __exit nx842_exit(void) if (old_devdata) dev_set_drvdata(old_devdata->dev, NULL); kfree(old_devdata); + nx842_unregister_driver(&nx842_pseries_driver); vio_unregister_driver(&nx842_driver); } diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c new file mode 100644 index 000000000000..f1f378eef373 --- /dev/null +++ b/drivers/crypto/nx/nx-842.c @@ -0,0 +1,144 @@ +/* + * Driver frontend for IBM Power 842 compression accelerator + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + * + * Designer of the Power data compression engine: + * Bulent Abali + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "nx-842.h" + +#define MODULE_NAME "nx-compress" +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dan Streetman "); +MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); + +/* Only one driver is expected, based on the HW platform */ +static struct nx842_driver *nx842_driver; +static DEFINE_SPINLOCK(nx842_driver_lock); /* protects driver pointers */ + +void nx842_register_driver(struct nx842_driver *driver) +{ + spin_lock(&nx842_driver_lock); + + if (nx842_driver) { + pr_err("can't register driver %s, already using driver %s\n", + driver->owner->name, nx842_driver->owner->name); + } else { + pr_info("registering driver %s\n", driver->owner->name); + nx842_driver = driver; + } + + spin_unlock(&nx842_driver_lock); +} +EXPORT_SYMBOL_GPL(nx842_register_driver); + +void nx842_unregister_driver(struct nx842_driver *driver) +{ + spin_lock(&nx842_driver_lock); + + if (nx842_driver == driver) { + pr_info("unregistering driver %s\n", driver->owner->name); + nx842_driver = NULL; + } else if (nx842_driver) { + pr_err("can't unregister driver %s, using driver %s\n", + driver->owner->name, nx842_driver->owner->name); + } else { + pr_err("can't unregister driver %s, no driver in use\n", + driver->owner->name); + } + + spin_unlock(&nx842_driver_lock); +} +EXPORT_SYMBOL_GPL(nx842_unregister_driver); + +static struct nx842_driver *get_driver(void) +{ + struct nx842_driver *driver = NULL; + + spin_lock(&nx842_driver_lock); + + driver = nx842_driver; + + if (driver && !try_module_get(driver->owner)) + driver = NULL; + + spin_unlock(&nx842_driver_lock); + + return driver; +} + +static void put_driver(struct nx842_driver *driver) +{ + module_put(driver->owner); +} + +int nx842_compress(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, + void *wrkmem) +{ + struct nx842_driver *driver = get_driver(); + int ret; + + if (!driver) + return -ENODEV; + + ret = driver->compress(in, in_len, out, out_len, wrkmem); + + put_driver(driver); + + return ret; +} +EXPORT_SYMBOL_GPL(nx842_compress); + +int nx842_decompress(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, + void *wrkmem) +{ + struct nx842_driver *driver = get_driver(); + int ret; + + if (!driver) + return -ENODEV; + + ret = driver->decompress(in, in_len, out, out_len, wrkmem); + + put_driver(driver); + + return ret; +} +EXPORT_SYMBOL_GPL(nx842_decompress); + +static __init int nx842_init(void) +{ + pr_info("loading\n"); + + if (of_find_compatible_node(NULL, NULL, NX842_PSERIES_COMPAT_NAME)) + request_module_nowait(NX842_PSERIES_MODULE_NAME); + else + pr_err("no nx842 driver found.\n"); + + pr_info("loaded\n"); + + return 0; +} +module_init(nx842_init); + +static void __exit nx842_exit(void) +{ + pr_info("NX842 unloaded\n"); +} +module_exit(nx842_exit); diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h new file mode 100644 index 000000000000..2a5d4e197c72 --- /dev/null +++ b/drivers/crypto/nx/nx-842.h @@ -0,0 +1,32 @@ + +#ifndef __NX_842_H__ +#define __NX_842_H__ + +#include +#include +#include +#include +#include +#include + +struct nx842_driver { + struct module *owner; + + int (*compress)(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, + void *wrkmem); + int (*decompress)(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, + void *wrkmem); +}; + +void nx842_register_driver(struct nx842_driver *driver); +void nx842_unregister_driver(struct nx842_driver *driver); + + +/* To allow the main nx-compress module to load platform module */ +#define NX842_PSERIES_MODULE_NAME "nx-compress-pseries" +#define NX842_PSERIES_COMPAT_NAME "ibm,compression" + + +#endif /* __NX_842_H__ */ diff --git a/include/linux/nx842.h b/include/linux/nx842.h index a4d324c6406a..d919c22b7fd6 100644 --- a/include/linux/nx842.h +++ b/include/linux/nx842.h @@ -1,11 +1,13 @@ #ifndef __NX842_H__ #define __NX842_H__ -int nx842_get_workmem_size(void); -int nx842_get_workmem_size_aligned(void); +#define __NX842_PSERIES_MEM_COMPRESS ((PAGE_SIZE * 2) + 10240) + +#define NX842_MEM_COMPRESS __NX842_PSERIES_MEM_COMPRESS + int nx842_compress(const unsigned char *in, unsigned int in_len, - unsigned char *out, unsigned int *out_len, void *wrkmem); + unsigned char *out, unsigned int *out_len, void *wrkmem); int nx842_decompress(const unsigned char *in, unsigned int in_len, - unsigned char *out, unsigned int *out_len, void *wrkmem); + unsigned char *out, unsigned int *out_len, void *wrkmem); #endif -- cgit v1.2.3 From 959e6659b6f74ec1fa4d391a3b88d63dc0189f36 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:18 -0400 Subject: crypto: nx - add nx842 constraints Add "constraints" for the NX-842 driver. The constraints are used to indicate what the current NX-842 platform driver is capable of. The constraints tell the NX-842 user what alignment, min and max length, and length multiple each provided buffers should conform to. These are required because the 842 hardware requires buffers to meet specific constraints that vary based on platform - for example, the pSeries max length is much lower than the PowerNV max length. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842-pseries.c | 10 ++++++++++ drivers/crypto/nx/nx-842.c | 38 ++++++++++++++++++++++++++++++++++++++ drivers/crypto/nx/nx-842.h | 2 ++ include/linux/nx842.h | 9 +++++++++ 4 files changed, 59 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 9b83c9e7fd73..cb481d81df06 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -40,6 +40,13 @@ MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); /* IO buffer must be 128 byte aligned */ #define IO_BUFFER_ALIGN 128 +static struct nx842_constraints nx842_pseries_constraints = { + .alignment = IO_BUFFER_ALIGN, + .multiple = DDE_BUFFER_LAST_MULT, + .minimum = IO_BUFFER_ALIGN, + .maximum = PAGE_SIZE, /* dynamic, max_sync_size */ +}; + struct nx842_header { int blocks_nr; /* number of compressed blocks */ int offset; /* offset of the first block (from beginning of header) */ @@ -842,6 +849,8 @@ static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, goto out; } + nx842_pseries_constraints.maximum = devdata->max_sync_size; + devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit, maxsynccop->decomp_sg_limit); if (devdata->max_sync_sg < 1) { @@ -1115,6 +1124,7 @@ static struct attribute_group nx842_attribute_group = { static struct nx842_driver nx842_pseries_driver = { .owner = THIS_MODULE, + .constraints = &nx842_pseries_constraints, .compress = nx842_pseries_compress, .decompress = nx842_pseries_decompress, }; diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index f1f378eef373..160fe2d97336 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -86,6 +86,44 @@ static void put_driver(struct nx842_driver *driver) module_put(driver->owner); } +/** + * nx842_constraints + * + * This provides the driver's constraints. Different nx842 implementations + * may have varying requirements. The constraints are: + * @alignment: All buffers should be aligned to this + * @multiple: All buffer lengths should be a multiple of this + * @minimum: Buffer lengths must not be less than this amount + * @maximum: Buffer lengths must not be more than this amount + * + * The constraints apply to all buffers and lengths, both input and output, + * for both compression and decompression, except for the minimum which + * only applies to compression input and decompression output; the + * compressed data can be less than the minimum constraint. It can be + * assumed that compressed data will always adhere to the multiple + * constraint. + * + * The driver may succeed even if these constraints are violated; + * however the driver can return failure or suffer reduced performance + * if any constraint is not met. + */ +int nx842_constraints(struct nx842_constraints *c) +{ + struct nx842_driver *driver = get_driver(); + int ret = 0; + + if (!driver) + return -ENODEV; + + BUG_ON(!c); + memcpy(c, driver->constraints, sizeof(*c)); + + put_driver(driver); + + return ret; +} +EXPORT_SYMBOL_GPL(nx842_constraints); + int nx842_compress(const unsigned char *in, unsigned int in_len, unsigned char *out, unsigned int *out_len, void *wrkmem) diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 2a5d4e197c72..c6ceb0f1d04c 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -12,6 +12,8 @@ struct nx842_driver { struct module *owner; + struct nx842_constraints *constraints; + int (*compress)(const unsigned char *in, unsigned int in_len, unsigned char *out, unsigned int *out_len, void *wrkmem); diff --git a/include/linux/nx842.h b/include/linux/nx842.h index d919c22b7fd6..aa1a97e90dea 100644 --- a/include/linux/nx842.h +++ b/include/linux/nx842.h @@ -5,6 +5,15 @@ #define NX842_MEM_COMPRESS __NX842_PSERIES_MEM_COMPRESS +struct nx842_constraints { + int alignment; + int multiple; + int minimum; + int maximum; +}; + +int nx842_constraints(struct nx842_constraints *constraints); + int nx842_compress(const unsigned char *in, unsigned int in_len, unsigned char *out, unsigned int *out_len, void *wrkmem); int nx842_decompress(const unsigned char *in, unsigned int in_len, -- cgit v1.2.3 From 99182a42b7ef3d5e4180992ce01befd9e87526d2 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:19 -0400 Subject: crypto: nx - add PowerNV platform NX-842 driver Add driver for NX-842 hardware on the PowerNV platform. This allows the use of the 842 compression hardware coprocessor on the PowerNV platform. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/Kconfig | 10 + drivers/crypto/nx/Makefile | 2 + drivers/crypto/nx/nx-842-powernv.c | 625 +++++++++++++++++++++++++++++++++++++ drivers/crypto/nx/nx-842-pseries.c | 9 - drivers/crypto/nx/nx-842.c | 4 +- drivers/crypto/nx/nx-842.h | 97 ++++++ include/linux/nx842.h | 6 +- 7 files changed, 741 insertions(+), 12 deletions(-) create mode 100644 drivers/crypto/nx/nx-842-powernv.c (limited to 'drivers') diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index 34013f7d46c8..ee9e25956241 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig @@ -40,4 +40,14 @@ config CRYPTO_DEV_NX_COMPRESS_PSERIES algorithm. This supports NX hardware on the pSeries platform. If you choose 'M' here, this module will be called nx_compress_pseries. +config CRYPTO_DEV_NX_COMPRESS_POWERNV + tristate "Compression acceleration support on PowerNV platform" + depends on PPC_POWERNV + default y + help + Support for PowerPC Nest (NX) compression acceleration. This + module supports acceleration for compressing memory with the 842 + algorithm. This supports NX hardware on the PowerNV platform. + If you choose 'M' here, this module will be called nx_compress_powernv. + endif diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index 5d9f4bc15209..6619787423b3 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -12,5 +12,7 @@ nx-crypto-objs := nx.o \ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress-objs := nx-842.o nx-compress-pseries-objs := nx-842-pseries.o +nx-compress-powernv-objs := nx-842-powernv.o diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c new file mode 100644 index 000000000000..6a9fb8b2d05b --- /dev/null +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -0,0 +1,625 @@ +/* + * Driver for IBM PowerNV 842 compression accelerator + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "nx-842.h" + +#include + +#include +#include + +#define MODULE_NAME NX842_POWERNV_MODULE_NAME +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dan Streetman "); +MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors"); + +#define WORKMEM_ALIGN (CRB_ALIGN) +#define CSB_WAIT_MAX (5000) /* ms */ + +struct nx842_workmem { + /* Below fields must be properly aligned */ + struct coprocessor_request_block crb; /* CRB_ALIGN align */ + struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */ + struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */ + /* Above fields must be properly aligned */ + + ktime_t start; + + char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */ +} __packed __aligned(WORKMEM_ALIGN); + +struct nx842_coproc { + unsigned int chip_id; + unsigned int ct; + unsigned int ci; + struct list_head list; +}; + +/* no cpu hotplug on powernv, so this list never changes after init */ +static LIST_HEAD(nx842_coprocs); +static unsigned int nx842_ct; + +/** + * setup_indirect_dde - Setup an indirect DDE + * + * The DDE is setup with the the DDE count, byte count, and address of + * first direct DDE in the list. + */ +static void setup_indirect_dde(struct data_descriptor_entry *dde, + struct data_descriptor_entry *ddl, + unsigned int dde_count, unsigned int byte_count) +{ + dde->flags = 0; + dde->count = dde_count; + dde->index = 0; + dde->length = cpu_to_be32(byte_count); + dde->address = cpu_to_be64(nx842_get_pa(ddl)); +} + +/** + * setup_direct_dde - Setup single DDE from buffer + * + * The DDE is setup with the buffer and length. The buffer must be properly + * aligned. The used length is returned. + * Returns: + * N Successfully set up DDE with N bytes + */ +static unsigned int setup_direct_dde(struct data_descriptor_entry *dde, + unsigned long pa, unsigned int len) +{ + unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa)); + + dde->flags = 0; + dde->count = 0; + dde->index = 0; + dde->length = cpu_to_be32(l); + dde->address = cpu_to_be64(pa); + + return l; +} + +/** + * setup_ddl - Setup DDL from buffer + * + * Returns: + * 0 Successfully set up DDL + */ +static int setup_ddl(struct data_descriptor_entry *dde, + struct data_descriptor_entry *ddl, + unsigned char *buf, unsigned int len, + bool in) +{ + unsigned long pa = nx842_get_pa(buf); + int i, ret, total_len = len; + + if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) { + pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n", + in ? "input" : "output", pa, DDE_BUFFER_ALIGN); + return -EINVAL; + } + + /* only need to check last mult; since buffer must be + * DDE_BUFFER_ALIGN aligned, and that is a multiple of + * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers + * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT. + */ + if (len % DDE_BUFFER_LAST_MULT) { + pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n", + in ? "input" : "output", len, DDE_BUFFER_LAST_MULT); + if (in) + return -EINVAL; + len = round_down(len, DDE_BUFFER_LAST_MULT); + } + + /* use a single direct DDE */ + if (len <= LEN_ON_PAGE(pa)) { + ret = setup_direct_dde(dde, pa, len); + WARN_ON(ret < len); + return 0; + } + + /* use the DDL */ + for (i = 0; i < DDL_LEN_MAX && len > 0; i++) { + ret = setup_direct_dde(&ddl[i], pa, len); + buf += ret; + len -= ret; + pa = nx842_get_pa(buf); + } + + if (len > 0) { + pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n", + total_len, in ? "input" : "output", len); + if (in) + return -EMSGSIZE; + total_len -= len; + } + setup_indirect_dde(dde, ddl, i, total_len); + + return 0; +} + +#define CSB_ERR(csb, msg, ...) \ + pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \ + ##__VA_ARGS__, (csb)->flags, \ + (csb)->cs, (csb)->cc, (csb)->ce, \ + be32_to_cpu((csb)->count)) + +#define CSB_ERR_ADDR(csb, msg, ...) \ + CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \ + (unsigned long)be64_to_cpu((csb)->address)) + +/** + * wait_for_csb + */ +static int wait_for_csb(struct nx842_workmem *wmem, + struct coprocessor_status_block *csb) +{ + ktime_t start = wmem->start, now = ktime_get(); + ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); + + while (!(ACCESS_ONCE(csb->flags) & CSB_V)) { + cpu_relax(); + now = ktime_get(); + if (ktime_after(now, timeout)) + break; + } + + /* hw has updated csb and output buffer */ + barrier(); + + /* check CSB flags */ + if (!(csb->flags & CSB_V)) { + CSB_ERR(csb, "CSB still not valid after %ld us, giving up", + (long)ktime_us_delta(now, start)); + return -ETIMEDOUT; + } + if (csb->flags & CSB_F) { + CSB_ERR(csb, "Invalid CSB format"); + return -EPROTO; + } + if (csb->flags & CSB_CH) { + CSB_ERR(csb, "Invalid CSB chaining state"); + return -EPROTO; + } + + /* verify CSB completion sequence is 0 */ + if (csb->cs) { + CSB_ERR(csb, "Invalid CSB completion sequence"); + return -EPROTO; + } + + /* check CSB Completion Code */ + switch (csb->cc) { + /* no error */ + case CSB_CC_SUCCESS: + break; + case CSB_CC_TPBC_GT_SPBC: + /* not an error, but the compressed data is + * larger than the uncompressed data :( + */ + break; + + /* input data errors */ + case CSB_CC_OPERAND_OVERLAP: + /* input and output buffers overlap */ + CSB_ERR(csb, "Operand Overlap error"); + return -EINVAL; + case CSB_CC_INVALID_OPERAND: + CSB_ERR(csb, "Invalid operand"); + return -EINVAL; + case CSB_CC_NOSPC: + /* output buffer too small */ + return -ENOSPC; + case CSB_CC_ABORT: + CSB_ERR(csb, "Function aborted"); + return -EINTR; + case CSB_CC_CRC_MISMATCH: + CSB_ERR(csb, "CRC mismatch"); + return -EINVAL; + case CSB_CC_TEMPL_INVALID: + CSB_ERR(csb, "Compressed data template invalid"); + return -EINVAL; + case CSB_CC_TEMPL_OVERFLOW: + CSB_ERR(csb, "Compressed data template shows data past end"); + return -EINVAL; + + /* these should not happen */ + case CSB_CC_INVALID_ALIGN: + /* setup_ddl should have detected this */ + CSB_ERR_ADDR(csb, "Invalid alignment"); + return -EINVAL; + case CSB_CC_DATA_LENGTH: + /* setup_ddl should have detected this */ + CSB_ERR(csb, "Invalid data length"); + return -EINVAL; + case CSB_CC_WR_TRANSLATION: + case CSB_CC_TRANSLATION: + case CSB_CC_TRANSLATION_DUP1: + case CSB_CC_TRANSLATION_DUP2: + case CSB_CC_TRANSLATION_DUP3: + case CSB_CC_TRANSLATION_DUP4: + case CSB_CC_TRANSLATION_DUP5: + case CSB_CC_TRANSLATION_DUP6: + /* should not happen, we use physical addrs */ + CSB_ERR_ADDR(csb, "Translation error"); + return -EPROTO; + case CSB_CC_WR_PROTECTION: + case CSB_CC_PROTECTION: + case CSB_CC_PROTECTION_DUP1: + case CSB_CC_PROTECTION_DUP2: + case CSB_CC_PROTECTION_DUP3: + case CSB_CC_PROTECTION_DUP4: + case CSB_CC_PROTECTION_DUP5: + case CSB_CC_PROTECTION_DUP6: + /* should not happen, we use physical addrs */ + CSB_ERR_ADDR(csb, "Protection error"); + return -EPROTO; + case CSB_CC_PRIVILEGE: + /* shouldn't happen, we're in HYP mode */ + CSB_ERR(csb, "Insufficient Privilege error"); + return -EPROTO; + case CSB_CC_EXCESSIVE_DDE: + /* shouldn't happen, setup_ddl doesn't use many dde's */ + CSB_ERR(csb, "Too many DDEs in DDL"); + return -EINVAL; + case CSB_CC_TRANSPORT: + /* shouldn't happen, we setup CRB correctly */ + CSB_ERR(csb, "Invalid CRB"); + return -EINVAL; + case CSB_CC_SEGMENTED_DDL: + /* shouldn't happen, setup_ddl creates DDL right */ + CSB_ERR(csb, "Segmented DDL error"); + return -EINVAL; + case CSB_CC_DDE_OVERFLOW: + /* shouldn't happen, setup_ddl creates DDL right */ + CSB_ERR(csb, "DDE overflow error"); + return -EINVAL; + case CSB_CC_SESSION: + /* should not happen with ICSWX */ + CSB_ERR(csb, "Session violation error"); + return -EPROTO; + case CSB_CC_CHAIN: + /* should not happen, we don't use chained CRBs */ + CSB_ERR(csb, "Chained CRB error"); + return -EPROTO; + case CSB_CC_SEQUENCE: + /* should not happen, we don't use chained CRBs */ + CSB_ERR(csb, "CRB seqeunce number error"); + return -EPROTO; + case CSB_CC_UNKNOWN_CODE: + CSB_ERR(csb, "Unknown subfunction code"); + return -EPROTO; + + /* hardware errors */ + case CSB_CC_RD_EXTERNAL: + case CSB_CC_RD_EXTERNAL_DUP1: + case CSB_CC_RD_EXTERNAL_DUP2: + case CSB_CC_RD_EXTERNAL_DUP3: + CSB_ERR_ADDR(csb, "Read error outside coprocessor"); + return -EPROTO; + case CSB_CC_WR_EXTERNAL: + CSB_ERR_ADDR(csb, "Write error outside coprocessor"); + return -EPROTO; + case CSB_CC_INTERNAL: + CSB_ERR(csb, "Internal error in coprocessor"); + return -EPROTO; + case CSB_CC_PROVISION: + CSB_ERR(csb, "Storage provision error"); + return -EPROTO; + case CSB_CC_HW: + CSB_ERR(csb, "Correctable hardware error"); + return -EPROTO; + + default: + CSB_ERR(csb, "Invalid CC %d", csb->cc); + return -EPROTO; + } + + /* check Completion Extension state */ + if (csb->ce & CSB_CE_TERMINATION) { + CSB_ERR(csb, "CSB request was terminated"); + return -EPROTO; + } + if (csb->ce & CSB_CE_INCOMPLETE) { + CSB_ERR(csb, "CSB request not complete"); + return -EPROTO; + } + if (!(csb->ce & CSB_CE_TPBC)) { + CSB_ERR(csb, "TPBC not provided, unknown target length"); + return -EPROTO; + } + + /* successful completion */ + pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count, + (unsigned long)ktime_us_delta(now, start)); + + return 0; +} + +/** + * nx842_powernv_function - compress/decompress data using the 842 algorithm + * + * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. + * This compresses or decompresses the provided input buffer into the provided + * output buffer. + * + * Upon return from this function @outlen contains the length of the + * output data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * The @workmem buffer should only be used by one function call at a time. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, must be at least NX842_MEM_COMPRESS + * @fc: function code, see CCW Function Codes in nx-842.h + * + * Returns: + * 0 Success, output of length @outlenp stored in the buffer at @out + * -ENODEV Hardware unavailable + * -ENOSPC Output buffer is to small + * -EMSGSIZE Input buffer too large + * -EINVAL buffer constraints do not fix nx842_constraints + * -EPROTO hardware error during operation + * -ETIMEDOUT hardware did not complete operation in reasonable time + * -EINTR operation was aborted + */ +static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *workmem, int fc) +{ + struct coprocessor_request_block *crb; + struct coprocessor_status_block *csb; + struct nx842_workmem *wmem; + int ret; + u64 csb_addr; + u32 ccw; + unsigned int outlen = *outlenp; + + wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); + + *outlenp = 0; + + /* shoudn't happen, we don't load without a coproc */ + if (!nx842_ct) { + pr_err_ratelimited("coprocessor CT is 0"); + return -ENODEV; + } + + crb = &wmem->crb; + csb = &crb->csb; + + /* Clear any previous values */ + memset(crb, 0, sizeof(*crb)); + + /* set up DDLs */ + ret = setup_ddl(&crb->source, wmem->ddl_in, + (unsigned char *)in, inlen, true); + if (ret) + return ret; + ret = setup_ddl(&crb->target, wmem->ddl_out, + out, outlen, false); + if (ret) + return ret; + + /* set up CCW */ + ccw = 0; + ccw = SET_FIELD(ccw, CCW_CT, nx842_ct); + ccw = SET_FIELD(ccw, CCW_CI_842, 0); /* use 0 for hw auto-selection */ + ccw = SET_FIELD(ccw, CCW_FC_842, fc); + + /* set up CRB's CSB addr */ + csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS; + csb_addr |= CRB_CSB_AT; /* Addrs are phys */ + crb->csb_addr = cpu_to_be64(csb_addr); + + wmem->start = ktime_get(); + + /* do ICSWX */ + ret = icswx(cpu_to_be32(ccw), crb); + + pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret, + (unsigned int)ccw, + (unsigned int)be32_to_cpu(crb->ccw)); + + switch (ret) { + case ICSWX_INITIATED: + ret = wait_for_csb(wmem, csb); + break; + case ICSWX_BUSY: + pr_debug_ratelimited("842 Coprocessor busy\n"); + ret = -EBUSY; + break; + case ICSWX_REJECTED: + pr_err_ratelimited("ICSWX rejected\n"); + ret = -EPROTO; + break; + default: + pr_err_ratelimited("Invalid ICSWX return code %x\n", ret); + ret = -EPROTO; + break; + } + + if (!ret) + *outlenp = be32_to_cpu(csb->count); + + return ret; +} + +/** + * nx842_powernv_compress - Compress data using the 842 algorithm + * + * Compression provided by the NX842 coprocessor on IBM PowerNV systems. + * The input buffer is compressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * compressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, must be at least NX842_MEM_COMPRESS + * + * Returns: see @nx842_powernv_function() + */ +static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *wmem) +{ + return nx842_powernv_function(in, inlen, out, outlenp, + wmem, CCW_FC_842_COMP_NOCRC); +} + +/** + * nx842_powernv_decompress - Decompress data using the 842 algorithm + * + * Decompression provided by the NX842 coprocessor on IBM PowerNV systems. + * The input buffer is decompressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * decompressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, must be at least NX842_MEM_COMPRESS + * + * Returns: see @nx842_powernv_function() + */ +static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *wmem) +{ + return nx842_powernv_function(in, inlen, out, outlenp, + wmem, CCW_FC_842_DECOMP_NOCRC); +} + +static int __init nx842_powernv_probe(struct device_node *dn) +{ + struct nx842_coproc *coproc; + struct property *ct_prop, *ci_prop; + unsigned int ct, ci; + int chip_id; + + chip_id = of_get_ibm_chip_id(dn); + if (chip_id < 0) { + pr_err("ibm,chip-id missing\n"); + return -EINVAL; + } + ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL); + if (!ct_prop) { + pr_err("ibm,842-coprocessor-type missing\n"); + return -EINVAL; + } + ct = be32_to_cpu(*(unsigned int *)ct_prop->value); + ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL); + if (!ci_prop) { + pr_err("ibm,842-coprocessor-instance missing\n"); + return -EINVAL; + } + ci = be32_to_cpu(*(unsigned int *)ci_prop->value); + + coproc = kmalloc(sizeof(*coproc), GFP_KERNEL); + if (!coproc) + return -ENOMEM; + + coproc->chip_id = chip_id; + coproc->ct = ct; + coproc->ci = ci; + INIT_LIST_HEAD(&coproc->list); + list_add(&coproc->list, &nx842_coprocs); + + pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); + + if (!nx842_ct) + nx842_ct = ct; + else if (nx842_ct != ct) + pr_err("NX842 chip %d, CT %d != first found CT %d\n", + chip_id, ct, nx842_ct); + + return 0; +} + +static struct nx842_constraints nx842_powernv_constraints = { + .alignment = DDE_BUFFER_ALIGN, + .multiple = DDE_BUFFER_LAST_MULT, + .minimum = DDE_BUFFER_LAST_MULT, + .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE, +}; + +static struct nx842_driver nx842_powernv_driver = { + .owner = THIS_MODULE, + .constraints = &nx842_powernv_constraints, + .compress = nx842_powernv_compress, + .decompress = nx842_powernv_decompress, +}; + +static __init int nx842_powernv_init(void) +{ + struct device_node *dn; + + /* verify workmem size/align restrictions */ + BUILD_BUG_ON(sizeof(struct nx842_workmem) > NX842_MEM_COMPRESS); + BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); + BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN); + BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN); + /* verify buffer size/align restrictions */ + BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN); + BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); + BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); + + pr_info("loading\n"); + + for_each_compatible_node(dn, NULL, NX842_POWERNV_COMPAT_NAME) + nx842_powernv_probe(dn); + + if (!nx842_ct) { + pr_err("no coprocessors found\n"); + return -ENODEV; + } + + nx842_register_driver(&nx842_powernv_driver); + + pr_info("loaded\n"); + + return 0; +} +module_init(nx842_powernv_init); + +static void __exit nx842_powernv_exit(void) +{ + struct nx842_coproc *coproc, *n; + + nx842_unregister_driver(&nx842_powernv_driver); + + list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + list_del(&coproc->list); + kfree(coproc); + } + + pr_info("unloaded\n"); +} +module_exit(nx842_powernv_exit); diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index cb481d81df06..6db99924652c 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -160,15 +160,6 @@ static inline unsigned long nx842_get_scatterlist_size( return sl->entry_nr * sizeof(struct nx842_slentry); } -static inline unsigned long nx842_get_pa(void *addr) -{ - if (is_vmalloc_addr(addr)) - return page_to_phys(vmalloc_to_page(addr)) - + offset_in_page(addr); - else - return __pa(addr); -} - static int nx842_build_scatterlist(unsigned long buf, int len, struct nx842_scatterlist *sl) { diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 160fe2d97336..bf2823ceaf4e 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -164,7 +164,9 @@ static __init int nx842_init(void) { pr_info("loading\n"); - if (of_find_compatible_node(NULL, NULL, NX842_PSERIES_COMPAT_NAME)) + if (of_find_compatible_node(NULL, NULL, NX842_POWERNV_COMPAT_NAME)) + request_module_nowait(NX842_POWERNV_MODULE_NAME); + else if (of_find_compatible_node(NULL, NULL, NX842_PSERIES_COMPAT_NAME)) request_module_nowait(NX842_PSERIES_MODULE_NAME); else pr_err("no nx842 driver found.\n"); diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index c6ceb0f1d04c..84b15b7448bb 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -5,9 +5,104 @@ #include #include #include +#include #include #include #include +#include +#include + +/* Restrictions on Data Descriptor List (DDL) and Entry (DDE) buffers + * + * From NX P8 workbook, sec 4.9.1 "842 details" + * Each DDE buffer is 128 byte aligned + * Each DDE buffer size is a multiple of 32 bytes (except the last) + * The last DDE buffer size is a multiple of 8 bytes + */ +#define DDE_BUFFER_ALIGN (128) +#define DDE_BUFFER_SIZE_MULT (32) +#define DDE_BUFFER_LAST_MULT (8) + +/* Arbitrary DDL length limit + * Allows max buffer size of MAX-1 to MAX pages + * (depending on alignment) + */ +#define DDL_LEN_MAX (17) + +/* CCW 842 CI/FC masks + * NX P8 workbook, section 4.3.1, figure 4-6 + * "CI/FC Boundary by NX CT type" + */ +#define CCW_CI_842 (0x00003ff8) +#define CCW_FC_842 (0x00000007) + +/* CCW Function Codes (FC) for 842 + * NX P8 workbook, section 4.9, table 4-28 + * "Function Code Definitions for 842 Memory Compression" + */ +#define CCW_FC_842_COMP_NOCRC (0) +#define CCW_FC_842_COMP_CRC (1) +#define CCW_FC_842_DECOMP_NOCRC (2) +#define CCW_FC_842_DECOMP_CRC (3) +#define CCW_FC_842_MOVE (4) + +/* CSB CC Error Types for 842 + * NX P8 workbook, section 4.10.3, table 4-30 + * "Reported Error Types Summary Table" + */ +/* These are all duplicates of existing codes defined in icswx.h. */ +#define CSB_CC_TRANSLATION_DUP1 (80) +#define CSB_CC_TRANSLATION_DUP2 (82) +#define CSB_CC_TRANSLATION_DUP3 (84) +#define CSB_CC_TRANSLATION_DUP4 (86) +#define CSB_CC_TRANSLATION_DUP5 (92) +#define CSB_CC_TRANSLATION_DUP6 (94) +#define CSB_CC_PROTECTION_DUP1 (81) +#define CSB_CC_PROTECTION_DUP2 (83) +#define CSB_CC_PROTECTION_DUP3 (85) +#define CSB_CC_PROTECTION_DUP4 (87) +#define CSB_CC_PROTECTION_DUP5 (93) +#define CSB_CC_PROTECTION_DUP6 (95) +#define CSB_CC_RD_EXTERNAL_DUP1 (89) +#define CSB_CC_RD_EXTERNAL_DUP2 (90) +#define CSB_CC_RD_EXTERNAL_DUP3 (91) +/* These are specific to NX */ +/* 842 codes */ +#define CSB_CC_TPBC_GT_SPBC (64) /* no error, but >1 comp ratio */ +#define CSB_CC_CRC_MISMATCH (65) /* decomp crc mismatch */ +#define CSB_CC_TEMPL_INVALID (66) /* decomp invalid template value */ +#define CSB_CC_TEMPL_OVERFLOW (67) /* decomp template shows data after end */ +/* sym crypt codes */ +#define CSB_CC_DECRYPT_OVERFLOW (64) +/* asym crypt codes */ +#define CSB_CC_MINV_OVERFLOW (128) +/* These are reserved for hypervisor use */ +#define CSB_CC_HYP_RESERVE_START (240) +#define CSB_CC_HYP_RESERVE_END (253) +#define CSB_CC_HYP_NO_HW (254) +#define CSB_CC_HYP_HANG_ABORTED (255) + +/* CCB Completion Modes (CM) for 842 + * NX P8 workbook, section 4.3, figure 4-5 + * "CRB Details - Normal Cop_Req (CL=00, C=1)" + */ +#define CCB_CM_EXTRA_WRITE (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_STORE) +#define CCB_CM_INTERRUPT (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_INTERRUPT) + +#define LEN_ON_PAGE(pa) (PAGE_SIZE - ((pa) & ~PAGE_MASK)) + +static inline unsigned long nx842_get_pa(void *addr) +{ + if (!is_vmalloc_addr(addr)) + return __pa(addr); + + return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); +} + +/* Get/Set bit fields */ +#define MASK_LSH(m) (__builtin_ffsl(m) - 1) +#define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m)) +#define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) struct nx842_driver { struct module *owner; @@ -27,6 +122,8 @@ void nx842_unregister_driver(struct nx842_driver *driver); /* To allow the main nx-compress module to load platform module */ +#define NX842_POWERNV_MODULE_NAME "nx-compress-powernv" +#define NX842_POWERNV_COMPAT_NAME "ibm,power-nx" #define NX842_PSERIES_MODULE_NAME "nx-compress-pseries" #define NX842_PSERIES_COMPAT_NAME "ibm,compression" diff --git a/include/linux/nx842.h b/include/linux/nx842.h index aa1a97e90dea..4ddf68d9c0d4 100644 --- a/include/linux/nx842.h +++ b/include/linux/nx842.h @@ -1,9 +1,11 @@ #ifndef __NX842_H__ #define __NX842_H__ -#define __NX842_PSERIES_MEM_COMPRESS ((PAGE_SIZE * 2) + 10240) +#define __NX842_PSERIES_MEM_COMPRESS (10240) +#define __NX842_POWERNV_MEM_COMPRESS (1024) -#define NX842_MEM_COMPRESS __NX842_PSERIES_MEM_COMPRESS +#define NX842_MEM_COMPRESS (max_t(unsigned int, \ + __NX842_PSERIES_MEM_COMPRESS, __NX842_POWERNV_MEM_COMPRESS)) struct nx842_constraints { int alignment; -- cgit v1.2.3 From b8e04187c90107c58d1ccbeb68a0ba4c5bfd4167 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:20 -0400 Subject: crypto: nx - simplify pSeries nx842 driver Simplify the pSeries NX-842 driver: do not expect incoming buffers to be exactly page-sized; do not break up input buffers to compress smaller blocks; do not use any internal headers in the compressed data blocks; remove the software decompression implementation; implement the pSeries nx842_constraints. This changes the pSeries NX-842 driver to perform constraints-based compression so that it only needs to compress one entire input block at a time. This removes the need for it to split input data blocks into multiple compressed data sections in the output buffer, and removes the need for any extra header info in the compressed data; all that is moved (in a later patch) into the main crypto 842 driver. Additionally, the 842 software decompression implementation is no longer needed here, as the crypto 842 driver will use the generic software 842 decompression function as a fallback if any hardware 842 driver fails. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842-pseries.c | 779 ++++++++----------------------------- 1 file changed, 153 insertions(+), 626 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 6db99924652c..85837e96e9a3 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -21,7 +21,6 @@ * Seth Jennings */ -#include #include #include "nx-842.h" @@ -32,11 +31,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Jennings "); MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); -#define SHIFT_4K 12 -#define SHIFT_64K 16 -#define SIZE_4K (1UL << SHIFT_4K) -#define SIZE_64K (1UL << SHIFT_64K) - /* IO buffer must be 128 byte aligned */ #define IO_BUFFER_ALIGN 128 @@ -47,18 +41,52 @@ static struct nx842_constraints nx842_pseries_constraints = { .maximum = PAGE_SIZE, /* dynamic, max_sync_size */ }; -struct nx842_header { - int blocks_nr; /* number of compressed blocks */ - int offset; /* offset of the first block (from beginning of header) */ - int sizes[0]; /* size of compressed blocks */ -}; - -static inline int nx842_header_size(const struct nx842_header *hdr) +static int check_constraints(unsigned long buf, unsigned int *len, bool in) { - return sizeof(struct nx842_header) + - hdr->blocks_nr * sizeof(hdr->sizes[0]); + if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) { + pr_debug("%s buffer 0x%lx not aligned to 0x%x\n", + in ? "input" : "output", buf, + nx842_pseries_constraints.alignment); + return -EINVAL; + } + if (*len % nx842_pseries_constraints.multiple) { + pr_debug("%s buffer len 0x%x not multiple of 0x%x\n", + in ? "input" : "output", *len, + nx842_pseries_constraints.multiple); + if (in) + return -EINVAL; + *len = round_down(*len, nx842_pseries_constraints.multiple); + } + if (*len < nx842_pseries_constraints.minimum) { + pr_debug("%s buffer len 0x%x under minimum 0x%x\n", + in ? "input" : "output", *len, + nx842_pseries_constraints.minimum); + return -EINVAL; + } + if (*len > nx842_pseries_constraints.maximum) { + pr_debug("%s buffer len 0x%x over maximum 0x%x\n", + in ? "input" : "output", *len, + nx842_pseries_constraints.maximum); + if (in) + return -EINVAL; + *len = nx842_pseries_constraints.maximum; + } + return 0; } +/* I assume we need to align the CSB? */ +#define WORKMEM_ALIGN (256) + +struct nx842_workmem { + /* scatterlist */ + char slin[4096]; + char slout[4096]; + /* coprocessor status/parameter block */ + struct nx_csbcpb csbcpb; + + char padding[WORKMEM_ALIGN]; +} __aligned(WORKMEM_ALIGN); + /* Macros for fields within nx_csbcpb */ /* Check the valid bit within the csbcpb valid field */ #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) @@ -72,8 +100,7 @@ static inline int nx842_header_size(const struct nx842_header *hdr) #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) /* The NX unit accepts data only on 4K page boundaries */ -#define NX842_HW_PAGE_SHIFT SHIFT_4K -#define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT) +#define NX842_HW_PAGE_SIZE (4096) #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) enum nx842_status { @@ -194,41 +221,6 @@ static int nx842_build_scatterlist(unsigned long buf, int len, return 0; } -/* - * Working memory for software decompression - */ -struct sw842_fifo { - union { - char f8[256][8]; - char f4[512][4]; - }; - char f2[256][2]; - unsigned char f84_full; - unsigned char f2_full; - unsigned char f8_count; - unsigned char f2_count; - unsigned int f4_count; -}; - -/* - * Working memory for crypto API - */ -struct nx842_workmem { - char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */ - union { - /* hardware working memory */ - struct { - /* scatterlist */ - char slin[SIZE_4K]; - char slout[SIZE_4K]; - /* coprocessor status/parameter block */ - struct nx_csbcpb csbcpb; - }; - /* software working memory */ - struct sw842_fifo swfifo; /* software decompression fifo */ - }; -}; - static int nx842_validate_result(struct device *dev, struct cop_status_block *csb) { @@ -291,8 +283,8 @@ static int nx842_validate_result(struct device *dev, * compressed data. If there is an error then @outlen will be 0 and an * error will be specified by the return code from this function. * - * @in: Pointer to input buffer, must be page aligned - * @inlen: Length of input buffer, must be PAGE_SIZE + * @in: Pointer to input buffer + * @inlen: Length of input buffer * @out: Pointer to output buffer * @outlen: Length of output buffer * @wrkmem: ptr to buffer for working memory, size determined by @@ -302,7 +294,6 @@ static int nx842_validate_result(struct device *dev, * 0 Success, output of length @outlen stored in the buffer at @out * -ENOMEM Unable to allocate internal buffers * -ENOSPC Output buffer is to small - * -EMSGSIZE XXX Difficult to describe this limitation * -EIO Internal error * -ENODEV Hardware unavailable */ @@ -310,29 +301,26 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlen, void *wmem) { - struct nx842_header *hdr; struct nx842_devdata *local_devdata; struct device *dev = NULL; struct nx842_workmem *workmem; struct nx842_scatterlist slin, slout; struct nx_csbcpb *csbcpb; - int ret = 0, max_sync_size, i, bytesleft, size, hdrsize; - unsigned long inbuf, outbuf, padding; + int ret = 0, max_sync_size; + unsigned long inbuf, outbuf; struct vio_pfo_op op = { .done = NULL, .handle = 0, .timeout = 0, }; - unsigned long start_time = get_tb(); + unsigned long start = get_tb(); - /* - * Make sure input buffer is 64k page aligned. This is assumed since - * this driver is designed for page compression only (for now). This - * is very nice since we can now use direct DDE(s) for the input and - * the alignment is guaranteed. - */ inbuf = (unsigned long)in; - if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE) + if (check_constraints(inbuf, &inlen, true)) + return -EINVAL; + + outbuf = (unsigned long)out; + if (check_constraints(outbuf, outlen, false)) return -EINVAL; rcu_read_lock(); @@ -344,16 +332,8 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, max_sync_size = local_devdata->max_sync_size; dev = local_devdata->dev; - /* Create the header */ - hdr = (struct nx842_header *)out; - hdr->blocks_nr = PAGE_SIZE / max_sync_size; - hdrsize = nx842_header_size(hdr); - outbuf = (unsigned long)out + hdrsize; - bytesleft = *outlen - hdrsize; - /* Init scatterlist */ - workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, - NX842_HW_PAGE_SIZE); + workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); slin.entries = (struct nx842_slentry *)workmem->slin; slout.entries = (struct nx842_slentry *)workmem->slout; @@ -364,105 +344,48 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, op.csbcpb = nx842_get_pa(csbcpb); op.out = nx842_get_pa(slout.entries); - for (i = 0; i < hdr->blocks_nr; i++) { - /* - * Aligning the output blocks to 128 bytes does waste space, - * but it prevents the need for bounce buffers and memory - * copies. It also simplifies the code a lot. In the worst - * case (64k page, 4k max_sync_size), you lose up to - * (128*16)/64k = ~3% the compression factor. For 64k - * max_sync_size, the loss would be at most 128/64k = ~0.2%. - */ - padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf; - outbuf += padding; - bytesleft -= padding; - if (i == 0) - /* save offset into first block in header */ - hdr->offset = padding + hdrsize; - - if (bytesleft <= 0) { - ret = -ENOSPC; - goto unlock; - } - - /* - * NOTE: If the default max_sync_size is changed from 4k - * to 64k, remove the "likely" case below, since a - * scatterlist will always be needed. - */ - if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { - /* Create direct DDE */ - op.in = nx842_get_pa((void *)inbuf); - op.inlen = max_sync_size; - - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(inbuf, max_sync_size, &slin); - op.in = nx842_get_pa(slin.entries); - op.inlen = -nx842_get_scatterlist_size(&slin); - } + if ((inbuf & NX842_HW_PAGE_MASK) == + ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { + /* Create direct DDE */ + op.in = nx842_get_pa((void *)inbuf); + op.inlen = inlen; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(inbuf, inlen, &slin); + op.in = nx842_get_pa(slin.entries); + op.inlen = -nx842_get_scatterlist_size(&slin); + } - /* - * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect - * DDE is required for the outbuf. - * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must - * also be page aligned (1 in 128/4k=32 chance) in order - * to use a direct DDE. - * This is unlikely, just use an indirect DDE always. - */ - nx842_build_scatterlist(outbuf, - min(bytesleft, max_sync_size), &slout); - /* op.out set before loop */ + if ((outbuf & NX842_HW_PAGE_MASK) == + ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { + /* Create direct DDE */ + op.out = nx842_get_pa((void *)outbuf); + op.outlen = *outlen; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(outbuf, *outlen, &slout); + op.out = nx842_get_pa(slout.entries); op.outlen = -nx842_get_scatterlist_size(&slout); + } - /* Send request to pHyp */ - ret = vio_h_cop_sync(local_devdata->vdev, &op); - - /* Check for pHyp error */ - if (ret) { - dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", - __func__, ret, op.hcall_err); - ret = -EIO; - goto unlock; - } - - /* Check for hardware error */ - ret = nx842_validate_result(dev, &csbcpb->csb); - if (ret && ret != -ENOSPC) - goto unlock; - - /* Handle incompressible data */ - if (unlikely(ret == -ENOSPC)) { - if (bytesleft < max_sync_size) { - /* - * Not enough space left in the output buffer - * to store uncompressed block - */ - goto unlock; - } else { - /* Store incompressible block */ - memcpy((void *)outbuf, (void *)inbuf, - max_sync_size); - hdr->sizes[i] = -max_sync_size; - outbuf += max_sync_size; - bytesleft -= max_sync_size; - /* Reset ret, incompressible data handled */ - ret = 0; - } - } else { - /* Normal case, compression was successful */ - size = csbcpb->csb.processed_byte_count; - dev_dbg(dev, "%s: processed_bytes=%d\n", - __func__, size); - hdr->sizes[i] = size; - outbuf += size; - bytesleft -= size; - } + /* Send request to pHyp */ + ret = vio_h_cop_sync(local_devdata->vdev, &op); - inbuf += max_sync_size; + /* Check for pHyp error */ + if (ret) { + dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", + __func__, ret, op.hcall_err); + ret = -EIO; + goto unlock; } - *outlen = (unsigned int)(outbuf - (unsigned long)out); + /* Check for hardware error */ + ret = nx842_validate_result(dev, &csbcpb->csb); + if (ret) + goto unlock; + + *outlen = csbcpb->csb.processed_byte_count; + dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen); unlock: if (ret) @@ -470,15 +393,12 @@ unlock: else { nx842_inc_comp_complete(local_devdata); ibm_nx842_incr_hist(local_devdata->counters->comp_times, - (get_tb() - start_time) / tb_ticks_per_usec); + (get_tb() - start) / tb_ticks_per_usec); } rcu_read_unlock(); return ret; } -static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, - const void *); - /** * nx842_pseries_decompress - Decompress data using the 842 algorithm * @@ -490,11 +410,10 @@ static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, * If there is an error then @outlen will be 0 and an error will be * specified by the return code from this function. * - * @in: Pointer to input buffer, will use bounce buffer if not 128 byte - * aligned + * @in: Pointer to input buffer * @inlen: Length of input buffer - * @out: Pointer to output buffer, must be page aligned - * @outlen: Length of output buffer, must be PAGE_SIZE + * @out: Pointer to output buffer + * @outlen: Length of output buffer * @wrkmem: ptr to buffer for working memory, size determined by * NX842_MEM_COMPRESS * @@ -510,43 +429,39 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlen, void *wmem) { - struct nx842_header *hdr; struct nx842_devdata *local_devdata; struct device *dev = NULL; struct nx842_workmem *workmem; struct nx842_scatterlist slin, slout; struct nx_csbcpb *csbcpb; - int ret = 0, i, size, max_sync_size; + int ret = 0, max_sync_size; unsigned long inbuf, outbuf; struct vio_pfo_op op = { .done = NULL, .handle = 0, .timeout = 0, }; - unsigned long start_time = get_tb(); + unsigned long start = get_tb(); /* Ensure page alignment and size */ + inbuf = (unsigned long)in; + if (check_constraints(inbuf, &inlen, true)) + return -EINVAL; + outbuf = (unsigned long)out; - if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE) + if (check_constraints(outbuf, outlen, false)) return -EINVAL; rcu_read_lock(); local_devdata = rcu_dereference(devdata); - if (local_devdata) - dev = local_devdata->dev; - - /* Get header */ - hdr = (struct nx842_header *)in; - - workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, - NX842_HW_PAGE_SIZE); - - inbuf = (unsigned long)in + hdr->offset; - if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) { - /* Copy block(s) into bounce buffer for alignment */ - memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset); - inbuf = (unsigned long)workmem->bounce; + if (!local_devdata || !local_devdata->dev) { + rcu_read_unlock(); + return -ENODEV; } + max_sync_size = local_devdata->max_sync_size; + dev = local_devdata->dev; + + workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); /* Init scatterlist */ slin.entries = (struct nx842_slentry *)workmem->slin; @@ -558,119 +473,55 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, memset(csbcpb, 0, sizeof(*csbcpb)); op.csbcpb = nx842_get_pa(csbcpb); - /* - * max_sync_size may have changed since compression, - * so we can't read it from the device info. We need - * to derive it from hdr->blocks_nr. - */ - max_sync_size = PAGE_SIZE / hdr->blocks_nr; - - for (i = 0; i < hdr->blocks_nr; i++) { - /* Skip padding */ - inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN); - - if (hdr->sizes[i] < 0) { - /* Negative sizes indicate uncompressed data blocks */ - size = abs(hdr->sizes[i]); - memcpy((void *)outbuf, (void *)inbuf, size); - outbuf += size; - inbuf += size; - continue; - } - - if (!dev) - goto sw; - - /* - * The better the compression, the more likely the "likely" - * case becomes. - */ - if (likely((inbuf & NX842_HW_PAGE_MASK) == - ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { - /* Create direct DDE */ - op.in = nx842_get_pa((void *)inbuf); - op.inlen = hdr->sizes[i]; - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); - op.in = nx842_get_pa(slin.entries); - op.inlen = -nx842_get_scatterlist_size(&slin); - } - - /* - * NOTE: If the default max_sync_size is changed from 4k - * to 64k, remove the "likely" case below, since a - * scatterlist will always be needed. - */ - if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { - /* Create direct DDE */ - op.out = nx842_get_pa((void *)outbuf); - op.outlen = max_sync_size; - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(outbuf, max_sync_size, &slout); - op.out = nx842_get_pa(slout.entries); - op.outlen = -nx842_get_scatterlist_size(&slout); - } - - /* Send request to pHyp */ - ret = vio_h_cop_sync(local_devdata->vdev, &op); - - /* Check for pHyp error */ - if (ret) { - dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", - __func__, ret, op.hcall_err); - dev = NULL; - goto sw; - } + if ((inbuf & NX842_HW_PAGE_MASK) == + ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { + /* Create direct DDE */ + op.in = nx842_get_pa((void *)inbuf); + op.inlen = inlen; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(inbuf, inlen, &slin); + op.in = nx842_get_pa(slin.entries); + op.inlen = -nx842_get_scatterlist_size(&slin); + } - /* Check for hardware error */ - ret = nx842_validate_result(dev, &csbcpb->csb); - if (ret) { - dev = NULL; - goto sw; - } + if ((outbuf & NX842_HW_PAGE_MASK) == + ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { + /* Create direct DDE */ + op.out = nx842_get_pa((void *)outbuf); + op.outlen = *outlen; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(outbuf, *outlen, &slout); + op.out = nx842_get_pa(slout.entries); + op.outlen = -nx842_get_scatterlist_size(&slout); + } - /* HW decompression success */ - inbuf += hdr->sizes[i]; - outbuf += csbcpb->csb.processed_byte_count; - continue; - -sw: - /* software decompression */ - size = max_sync_size; - ret = sw842_decompress( - (unsigned char *)inbuf, hdr->sizes[i], - (unsigned char *)outbuf, &size, wmem); - if (ret) - pr_debug("%s: sw842_decompress failed with %d\n", - __func__, ret); - - if (ret) { - if (ret != -ENOSPC && ret != -EINVAL && - ret != -EMSGSIZE) - ret = -EIO; - goto unlock; - } + /* Send request to pHyp */ + ret = vio_h_cop_sync(local_devdata->vdev, &op); - /* SW decompression success */ - inbuf += hdr->sizes[i]; - outbuf += size; + /* Check for pHyp error */ + if (ret) { + dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", + __func__, ret, op.hcall_err); + goto unlock; } - *outlen = (unsigned int)(outbuf - (unsigned long)out); + /* Check for hardware error */ + ret = nx842_validate_result(dev, &csbcpb->csb); + if (ret) + goto unlock; + + *outlen = csbcpb->csb.processed_byte_count; unlock: if (ret) /* decompress fail */ nx842_inc_decomp_failed(local_devdata); else { - if (!dev) - /* software decompress */ - nx842_inc_swdecomp(local_devdata); nx842_inc_decomp_complete(local_devdata); ibm_nx842_incr_hist(local_devdata->counters->decomp_times, - (get_tb() - start_time) / tb_ticks_per_usec); + (get_tb() - start) / tb_ticks_per_usec); } rcu_read_unlock(); @@ -829,9 +680,9 @@ static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, maxsynccop->decomp_data_limit); devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, - SIZE_64K); + 65536); - if (devdata->max_sync_size < SIZE_4K) { + if (devdata->max_sync_size < 4096) { dev_err(devdata->dev, "%s: hardware max data size (%u) is " "less than the driver minimum, unable to use " "the hardware device\n", @@ -1220,17 +1071,17 @@ static int __exit nx842_remove(struct vio_dev *viodev) return 0; } -static struct vio_device_id nx842_driver_ids[] = { +static struct vio_device_id nx842_vio_driver_ids[] = { {NX842_PSERIES_COMPAT_NAME "-v1", NX842_PSERIES_COMPAT_NAME}, {"", ""}, }; -static struct vio_driver nx842_driver = { +static struct vio_driver nx842_vio_driver = { .name = MODULE_NAME, .probe = nx842_probe, .remove = __exit_p(nx842_remove), .get_desired_dma = nx842_get_desired_dma, - .id_table = nx842_driver_ids, + .id_table = nx842_vio_driver_ids, }; static int __init nx842_init(void) @@ -1249,7 +1100,7 @@ static int __init nx842_init(void) new_devdata->status = UNAVAILABLE; RCU_INIT_POINTER(devdata, new_devdata); - return vio_register_driver(&nx842_driver); + return vio_register_driver(&nx842_vio_driver); } module_init(nx842_init); @@ -1266,336 +1117,12 @@ static void __exit nx842_exit(void) RCU_INIT_POINTER(devdata, NULL); spin_unlock_irqrestore(&devdata_mutex, flags); synchronize_rcu(); - if (old_devdata) + if (old_devdata && old_devdata->dev) dev_set_drvdata(old_devdata->dev, NULL); kfree(old_devdata); nx842_unregister_driver(&nx842_pseries_driver); - vio_unregister_driver(&nx842_driver); + vio_unregister_driver(&nx842_vio_driver); } module_exit(nx842_exit); -/********************************* - * 842 software decompressor -*********************************/ -typedef int (*sw842_template_op)(const char **, int *, unsigned char **, - struct sw842_fifo *); - -static int sw842_data8(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_data4(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_data2(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr8(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr4(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr2(const char **, int *, unsigned char **, - struct sw842_fifo *); - -/* special templates */ -#define SW842_TMPL_REPEAT 0x1B -#define SW842_TMPL_ZEROS 0x1C -#define SW842_TMPL_EOF 0x1E - -static sw842_template_op sw842_tmpl_ops[26][4] = { - { sw842_data8, NULL}, /* 0 (00000) */ - { sw842_data4, sw842_data2, sw842_ptr2, NULL}, - { sw842_data4, sw842_ptr2, sw842_data2, NULL}, - { sw842_data4, sw842_ptr2, sw842_ptr2, NULL}, - { sw842_data4, sw842_ptr4, NULL}, - { sw842_data2, sw842_ptr2, sw842_data4, NULL}, - { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2}, - { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2}, - { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,}, - { sw842_data2, sw842_ptr2, sw842_ptr4, NULL}, - { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */ - { sw842_ptr2, sw842_data4, sw842_ptr2, NULL}, - { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2}, - { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2}, - { sw842_ptr2, sw842_data2, sw842_ptr4, NULL}, - { sw842_ptr2, sw842_ptr2, sw842_data4, NULL}, - { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL}, - { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */ - { sw842_ptr4, sw842_data2, sw842_ptr2, NULL}, - { sw842_ptr4, sw842_ptr2, sw842_data2, NULL}, - { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL}, - { sw842_ptr4, sw842_ptr4, NULL}, - { sw842_ptr8, NULL} -}; - -/* Software decompress helpers */ - -static uint8_t sw842_get_byte(const char *buf, int bit) -{ - uint8_t tmpl; - uint16_t tmp; - tmp = htons(*(uint16_t *)(buf)); - tmp = (uint16_t)(tmp << bit); - tmp = ntohs(tmp); - memcpy(&tmpl, &tmp, 1); - return tmpl; -} - -static uint8_t sw842_get_template(const char **buf, int *bit) -{ - uint8_t byte; - byte = sw842_get_byte(*buf, *bit); - byte = byte >> 3; - byte &= 0x1F; - *buf += (*bit + 5) / 8; - *bit = (*bit + 5) % 8; - return byte; -} - -/* repeat_count happens to be 5-bit too (like the template) */ -static uint8_t sw842_get_repeat_count(const char **buf, int *bit) -{ - uint8_t byte; - byte = sw842_get_byte(*buf, *bit); - byte = byte >> 2; - byte &= 0x3F; - *buf += (*bit + 6) / 8; - *bit = (*bit + 6) % 8; - return byte; -} - -static uint8_t sw842_get_ptr2(const char **buf, int *bit) -{ - uint8_t ptr; - ptr = sw842_get_byte(*buf, *bit); - (*buf)++; - return ptr; -} - -static uint16_t sw842_get_ptr4(const char **buf, int *bit, - struct sw842_fifo *fifo) -{ - uint16_t ptr; - ptr = htons(*(uint16_t *)(*buf)); - ptr = (uint16_t)(ptr << *bit); - ptr = ptr >> 7; - ptr &= 0x01FF; - *buf += (*bit + 9) / 8; - *bit = (*bit + 9) % 8; - return ptr; -} - -static uint8_t sw842_get_ptr8(const char **buf, int *bit, - struct sw842_fifo *fifo) -{ - return sw842_get_ptr2(buf, bit); -} - -/* Software decompress template ops */ - -static int sw842_data8(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - int ret; - - ret = sw842_data4(inbuf, inbit, outbuf, fifo); - if (ret) - return ret; - ret = sw842_data4(inbuf, inbit, outbuf, fifo); - return ret; -} - -static int sw842_data4(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - int ret; - - ret = sw842_data2(inbuf, inbit, outbuf, fifo); - if (ret) - return ret; - ret = sw842_data2(inbuf, inbit, outbuf, fifo); - return ret; -} - -static int sw842_data2(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - **outbuf = sw842_get_byte(*inbuf, *inbit); - (*inbuf)++; - (*outbuf)++; - **outbuf = sw842_get_byte(*inbuf, *inbit); - (*inbuf)++; - (*outbuf)++; - return 0; -} - -static int sw842_ptr8(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint8_t ptr; - ptr = sw842_get_ptr8(inbuf, inbit, fifo); - if (!fifo->f84_full && (ptr >= fifo->f8_count)) - return 1; - memcpy(*outbuf, fifo->f8[ptr], 8); - *outbuf += 8; - return 0; -} - -static int sw842_ptr4(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint16_t ptr; - ptr = sw842_get_ptr4(inbuf, inbit, fifo); - if (!fifo->f84_full && (ptr >= fifo->f4_count)) - return 1; - memcpy(*outbuf, fifo->f4[ptr], 4); - *outbuf += 4; - return 0; -} - -static int sw842_ptr2(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint8_t ptr; - ptr = sw842_get_ptr2(inbuf, inbit); - if (!fifo->f2_full && (ptr >= fifo->f2_count)) - return 1; - memcpy(*outbuf, fifo->f2[ptr], 2); - *outbuf += 2; - return 0; -} - -static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo) -{ - unsigned char initial_f2count = fifo->f2_count; - - memcpy(fifo->f8[fifo->f8_count], buf, 8); - fifo->f4_count += 2; - fifo->f8_count += 1; - - if (!fifo->f84_full && fifo->f4_count >= 512) { - fifo->f84_full = 1; - fifo->f4_count /= 512; - } - - memcpy(fifo->f2[fifo->f2_count++], buf, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2); - if (fifo->f2_count < initial_f2count) - fifo->f2_full = 1; -} - -static int sw842_decompress(const unsigned char *src, int srclen, - unsigned char *dst, int *destlen, - const void *wrkmem) -{ - uint8_t tmpl; - const char *inbuf; - int inbit = 0; - unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf; - const char *inbuf_end; - sw842_template_op op; - int opindex; - int i, repeat_count; - struct sw842_fifo *fifo; - int ret = 0; - - fifo = &((struct nx842_workmem *)(wrkmem))->swfifo; - memset(fifo, 0, sizeof(*fifo)); - - origbuf = NULL; - inbuf = src; - inbuf_end = src + srclen; - outbuf = dst; - outbuf_end = dst + *destlen; - - while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) { - if (inbuf >= inbuf_end) { - ret = -EINVAL; - goto out; - } - - opindex = 0; - prevbuf = origbuf; - origbuf = outbuf; - switch (tmpl) { - case SW842_TMPL_REPEAT: - if (prevbuf == NULL) { - ret = -EINVAL; - goto out; - } - - repeat_count = sw842_get_repeat_count(&inbuf, - &inbit) + 1; - - /* Did the repeat count advance past the end of input */ - if (inbuf > inbuf_end) { - ret = -EINVAL; - goto out; - } - - for (i = 0; i < repeat_count; i++) { - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - memcpy(outbuf, prevbuf, 8); - sw842_copy_to_fifo(outbuf, fifo); - outbuf += 8; - } - break; - - case SW842_TMPL_ZEROS: - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - memset(outbuf, 0, 8); - sw842_copy_to_fifo(outbuf, fifo); - outbuf += 8; - break; - - default: - if (tmpl > 25) { - ret = -EINVAL; - goto out; - } - - /* Does this go past the end of the input buffer */ - if ((inbuf + 2) > inbuf_end) { - ret = -EINVAL; - goto out; - } - - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - while (opindex < 4 && - (op = sw842_tmpl_ops[tmpl][opindex++]) - != NULL) { - ret = (*op)(&inbuf, &inbit, &outbuf, fifo); - if (ret) { - ret = -EINVAL; - goto out; - } - sw842_copy_to_fifo(origbuf, fifo); - } - } - } - -out: - if (!ret) - *destlen = (unsigned int)(outbuf - dst); - else - *destlen = 0; - - return ret; -} -- cgit v1.2.3 From ed70b479c2c0b6e1319f0cb2de19f1051be219a4 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:21 -0400 Subject: crypto: nx - add hardware 842 crypto comp alg Add crypto compression alg for 842 hardware compression and decompression, using the alg name "842" and driver_name "842-nx". This uses only the PowerPC coprocessor hardware for 842 compression. It also uses the hardware for decompression, but if the hardware fails it will fall back to the 842 software decompression library, so that decompression never fails (for valid 842 compressed buffers). A header must be used in most cases, due to the hardware's restrictions on the buffers being specifically aligned and sized. Due to the header this driver adds, compressed buffers it creates cannot be directly passed to the 842 software library for decompression. However, compressed buffers created by the software 842 library can be passed to this driver for hardware 842 decompression (with the exception of buffers containing the "short data" template, as lib/842/842.h explains). Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/Kconfig | 10 + drivers/crypto/nx/Makefile | 2 + drivers/crypto/nx/nx-842-crypto.c | 585 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 597 insertions(+) create mode 100644 drivers/crypto/nx/nx-842-crypto.c (limited to 'drivers') diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index ee9e25956241..3e621ad09675 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig @@ -50,4 +50,14 @@ config CRYPTO_DEV_NX_COMPRESS_POWERNV algorithm. This supports NX hardware on the PowerNV platform. If you choose 'M' here, this module will be called nx_compress_powernv. +config CRYPTO_DEV_NX_COMPRESS_CRYPTO + tristate "Compression acceleration cryptographic interface" + select CRYPTO_ALGAPI + select 842_DECOMPRESS + default y + help + Support for PowerPC Nest (NX) accelerators using the cryptographic + API. If you choose 'M' here, this module will be called + nx_compress_crypto. + endif diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index 6619787423b3..868b5e630794 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -13,6 +13,8 @@ nx-crypto-objs := nx.o \ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o nx-compress-objs := nx-842.o nx-compress-pseries-objs := nx-842-pseries.o nx-compress-powernv-objs := nx-842-powernv.o +nx-compress-crypto-objs := nx-842-crypto.o diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c new file mode 100644 index 000000000000..cb177c317179 --- /dev/null +++ b/drivers/crypto/nx/nx-842-crypto.c @@ -0,0 +1,585 @@ +/* + * Cryptographic API for the NX-842 hardware compression. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) IBM Corporation, 2011-2015 + * + * Original Authors: Robert Jennings + * Seth Jennings + * + * Rewrite: Dan Streetman + * + * This is an interface to the NX-842 compression hardware in PowerPC + * processors. Most of the complexity of this drvier is due to the fact that + * the NX-842 compression hardware requires the input and output data buffers + * to be specifically aligned, to be a specific multiple in length, and within + * specific minimum and maximum lengths. Those restrictions, provided by the + * nx-842 driver via nx842_constraints, mean this driver must use bounce + * buffers and headers to correct misaligned in or out buffers, and to split + * input buffers that are too large. + * + * This driver will fall back to software decompression if the hardware + * decompression fails, so this driver's decompression should never fail as + * long as the provided compressed buffer is valid. Any compressed buffer + * created by this driver will have a header (except ones where the input + * perfectly matches the constraints); so users of this driver cannot simply + * pass a compressed buffer created by this driver over to the 842 software + * decompression library. Instead, users must use this driver to decompress; + * if the hardware fails or is unavailable, the compressed buffer will be + * parsed and the header removed, and the raw 842 buffer(s) passed to the 842 + * software decompression library. + * + * This does not fall back to software compression, however, since the caller + * of this function is specifically requesting hardware compression; if the + * hardware compression fails, the caller can fall back to software + * compression, and the raw 842 compressed buffer that the software compressor + * creates can be passed to this driver for hardware decompression; any + * buffer without our specific header magic is assumed to be a raw 842 buffer + * and passed directly to the hardware. Note that the software compression + * library will produce a compressed buffer that is incompatible with the + * hardware decompressor if the original input buffer length is not a multiple + * of 8; if such a compressed buffer is passed to this driver for + * decompression, the hardware will reject it and this driver will then pass + * it over to the software library for decompression. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit + * template (see lib/842/842.h), so this magic number will never appear at + * the start of a raw 842 compressed buffer. That is important, as any buffer + * passed to us without this magic is assumed to be a raw 842 compressed + * buffer, and passed directly to the hardware to decompress. + */ +#define NX842_CRYPTO_MAGIC (0xf842) +#define NX842_CRYPTO_GROUP_MAX (0x20) +#define NX842_CRYPTO_HEADER_SIZE(g) \ + (sizeof(struct nx842_crypto_header) + \ + sizeof(struct nx842_crypto_header_group) * (g)) +#define NX842_CRYPTO_HEADER_MAX_SIZE \ + NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX) + +/* bounce buffer size */ +#define BOUNCE_BUFFER_ORDER (2) +#define BOUNCE_BUFFER_SIZE \ + ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER)) + +/* try longer on comp because we can fallback to sw decomp if hw is busy */ +#define COMP_BUSY_TIMEOUT (250) /* ms */ +#define DECOMP_BUSY_TIMEOUT (50) /* ms */ + +struct nx842_crypto_header_group { + __be16 padding; /* unused bytes at start of group */ + __be32 compressed_length; /* compressed bytes in group */ + __be32 uncompressed_length; /* bytes after decompression */ +} __packed; + +struct nx842_crypto_header { + __be16 magic; /* NX842_CRYPTO_MAGIC */ + __be16 ignore; /* decompressed end bytes to ignore */ + u8 groups; /* total groups in this header */ + struct nx842_crypto_header_group group[]; +} __packed; + +struct nx842_crypto_param { + u8 *in; + unsigned int iremain; + u8 *out; + unsigned int oremain; + unsigned int ototal; +}; + +static int update_param(struct nx842_crypto_param *p, + unsigned int slen, unsigned int dlen) +{ + if (p->iremain < slen) + return -EOVERFLOW; + if (p->oremain < dlen) + return -ENOSPC; + + p->in += slen; + p->iremain -= slen; + p->out += dlen; + p->oremain -= dlen; + p->ototal += dlen; + + return 0; +} + +struct nx842_crypto_ctx { + u8 *wmem; + u8 *sbounce, *dbounce; + + struct nx842_crypto_header header; + struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX]; +}; + +static int nx842_crypto_init(struct crypto_tfm *tfm) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->wmem = kmalloc(NX842_MEM_COMPRESS, GFP_KERNEL); + ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { + kfree(ctx->wmem); + free_page((unsigned long)ctx->sbounce); + free_page((unsigned long)ctx->dbounce); + return -ENOMEM; + } + + return 0; +} + +static void nx842_crypto_exit(struct crypto_tfm *tfm) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + kfree(ctx->wmem); + free_page((unsigned long)ctx->sbounce); + free_page((unsigned long)ctx->dbounce); +} + +static int read_constraints(struct nx842_constraints *c) +{ + int ret; + + ret = nx842_constraints(c); + if (ret) { + pr_err_ratelimited("could not get nx842 constraints : %d\n", + ret); + return ret; + } + + /* limit maximum, to always have enough bounce buffer to decompress */ + if (c->maximum > BOUNCE_BUFFER_SIZE) { + c->maximum = BOUNCE_BUFFER_SIZE; + pr_info_once("limiting nx842 maximum to %x\n", c->maximum); + } + + return 0; +} + +static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) +{ + int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); + + /* compress should have added space for header */ + if (s > be16_to_cpu(hdr->group[0].padding)) { + pr_err("Internal error: no space for header\n"); + return -EINVAL; + } + + memcpy(buf, hdr, s); + + print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); + + return 0; +} + +static int compress(struct nx842_crypto_ctx *ctx, + struct nx842_crypto_param *p, + struct nx842_crypto_header_group *g, + struct nx842_constraints *c, + u16 *ignore, + unsigned int hdrsize) +{ + unsigned int slen = p->iremain, dlen = p->oremain, tmplen; + unsigned int adj_slen = slen; + u8 *src = p->in, *dst = p->out; + int ret, dskip = 0; + ktime_t timeout; + + if (p->iremain == 0) + return -EOVERFLOW; + + if (p->oremain == 0 || hdrsize + c->minimum > dlen) + return -ENOSPC; + + if (slen % c->multiple) + adj_slen = round_up(slen, c->multiple); + if (slen < c->minimum) + adj_slen = c->minimum; + if (slen > c->maximum) + adj_slen = slen = c->maximum; + if (adj_slen > slen || (u64)src % c->alignment) { + adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE); + slen = min(slen, BOUNCE_BUFFER_SIZE); + if (adj_slen > slen) + memset(ctx->sbounce + slen, 0, adj_slen - slen); + memcpy(ctx->sbounce, src, slen); + src = ctx->sbounce; + slen = adj_slen; + pr_debug("using comp sbounce buffer, len %x\n", slen); + } + + dst += hdrsize; + dlen -= hdrsize; + + if ((u64)dst % c->alignment) { + dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); + dst += dskip; + dlen -= dskip; + } + if (dlen % c->multiple) + dlen = round_down(dlen, c->multiple); + if (dlen < c->minimum) { +nospc: + dst = ctx->dbounce; + dlen = min(p->oremain, BOUNCE_BUFFER_SIZE); + dlen = round_down(dlen, c->multiple); + dskip = 0; + pr_debug("using comp dbounce buffer, len %x\n", dlen); + } + if (dlen > c->maximum) + dlen = c->maximum; + + tmplen = dlen; + timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT); + do { + dlen = tmplen; /* reset dlen, if we're retrying */ + ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem); + /* possibly we should reduce the slen here, instead of + * retrying with the dbounce buffer? + */ + if (ret == -ENOSPC && dst != ctx->dbounce) + goto nospc; + } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); + if (ret) + return ret; + + dskip += hdrsize; + + if (dst == ctx->dbounce) + memcpy(p->out + dskip, dst, dlen); + + g->padding = cpu_to_be16(dskip); + g->compressed_length = cpu_to_be32(dlen); + g->uncompressed_length = cpu_to_be32(slen); + + if (p->iremain < slen) { + *ignore = slen - p->iremain; + slen = p->iremain; + } + + pr_debug("compress slen %x ignore %x dlen %x padding %x\n", + slen, *ignore, dlen, dskip); + + return update_param(p, slen, dskip + dlen); +} + +static int nx842_crypto_compress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_header *hdr = &ctx->header; + struct nx842_crypto_param p; + struct nx842_constraints c; + unsigned int groups, hdrsize, h; + int ret, n; + bool add_header; + u16 ignore = 0; + + if (!tfm || !src || !slen || !dst || !dlen) + return -EINVAL; + + p.in = (u8 *)src; + p.iremain = slen; + p.out = dst; + p.oremain = *dlen; + p.ototal = 0; + + *dlen = 0; + + ret = read_constraints(&c); + if (ret) + return ret; + + groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX, + DIV_ROUND_UP(p.iremain, c.maximum)); + hdrsize = NX842_CRYPTO_HEADER_SIZE(groups); + + /* skip adding header if the buffers meet all constraints */ + add_header = (p.iremain % c.multiple || + p.iremain < c.minimum || + p.iremain > c.maximum || + (u64)p.in % c.alignment || + p.oremain % c.multiple || + p.oremain < c.minimum || + p.oremain > c.maximum || + (u64)p.out % c.alignment); + + hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC); + hdr->groups = 0; + hdr->ignore = 0; + + while (p.iremain > 0) { + n = hdr->groups++; + if (hdr->groups > NX842_CRYPTO_GROUP_MAX) + return -ENOSPC; + + /* header goes before first group */ + h = !n && add_header ? hdrsize : 0; + + if (ignore) + pr_warn("interal error, ignore is set %x\n", ignore); + + ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); + if (ret) + return ret; + } + + if (!add_header && hdr->groups > 1) { + pr_err("Internal error: No header but multiple groups\n"); + return -EINVAL; + } + + /* ignore indicates the input stream needed to be padded */ + hdr->ignore = cpu_to_be16(ignore); + if (ignore) + pr_debug("marked %d bytes as ignore\n", ignore); + + if (add_header) + ret = nx842_crypto_add_header(hdr, dst); + if (ret) + return ret; + + *dlen = p.ototal; + + pr_debug("compress total slen %x dlen %x\n", slen, *dlen); + + return 0; +} + +static int decompress(struct nx842_crypto_ctx *ctx, + struct nx842_crypto_param *p, + struct nx842_crypto_header_group *g, + struct nx842_constraints *c, + u16 ignore, + bool usehw) +{ + unsigned int slen = be32_to_cpu(g->compressed_length); + unsigned int required_len = be32_to_cpu(g->uncompressed_length); + unsigned int dlen = p->oremain, tmplen; + unsigned int adj_slen = slen; + u8 *src = p->in, *dst = p->out; + u16 padding = be16_to_cpu(g->padding); + int ret, spadding = 0, dpadding = 0; + ktime_t timeout; + + if (!slen || !required_len) + return -EINVAL; + + if (p->iremain <= 0 || padding + slen > p->iremain) + return -EOVERFLOW; + + if (p->oremain <= 0 || required_len - ignore > p->oremain) + return -ENOSPC; + + src += padding; + + if (!usehw) + goto usesw; + + if (slen % c->multiple) + adj_slen = round_up(slen, c->multiple); + if (slen < c->minimum) + adj_slen = c->minimum; + if (slen > c->maximum) + goto usesw; + if (slen < adj_slen || (u64)src % c->alignment) { + /* we can append padding bytes because the 842 format defines + * an "end" template (see lib/842/842_decompress.c) and will + * ignore any bytes following it. + */ + if (slen < adj_slen) + memset(ctx->sbounce + slen, 0, adj_slen - slen); + memcpy(ctx->sbounce, src, slen); + src = ctx->sbounce; + spadding = adj_slen - slen; + slen = adj_slen; + pr_debug("using decomp sbounce buffer, len %x\n", slen); + } + + if (dlen % c->multiple) + dlen = round_down(dlen, c->multiple); + if (dlen < required_len || (u64)dst % c->alignment) { + dst = ctx->dbounce; + dlen = min(required_len, BOUNCE_BUFFER_SIZE); + pr_debug("using decomp dbounce buffer, len %x\n", dlen); + } + if (dlen < c->minimum) + goto usesw; + if (dlen > c->maximum) + dlen = c->maximum; + + tmplen = dlen; + timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT); + do { + dlen = tmplen; /* reset dlen, if we're retrying */ + ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem); + } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); + if (ret) { +usesw: + /* reset everything, sw doesn't have constraints */ + src = p->in + padding; + slen = be32_to_cpu(g->compressed_length); + spadding = 0; + dst = p->out; + dlen = p->oremain; + dpadding = 0; + if (dlen < required_len) { /* have ignore bytes */ + dst = ctx->dbounce; + dlen = BOUNCE_BUFFER_SIZE; + } + pr_info_ratelimited("using software 842 decompression\n"); + ret = sw842_decompress(src, slen, dst, &dlen); + } + if (ret) + return ret; + + slen -= spadding; + + dlen -= ignore; + if (ignore) + pr_debug("ignoring last %x bytes\n", ignore); + + if (dst == ctx->dbounce) + memcpy(p->out, dst, dlen); + + pr_debug("decompress slen %x padding %x dlen %x ignore %x\n", + slen, padding, dlen, ignore); + + return update_param(p, slen + padding, dlen); +} + +static int nx842_crypto_decompress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_header *hdr; + struct nx842_crypto_param p; + struct nx842_constraints c; + int n, ret, hdr_len; + u16 ignore = 0; + bool usehw = true; + + if (!tfm || !src || !slen || !dst || !dlen) + return -EINVAL; + + p.in = (u8 *)src; + p.iremain = slen; + p.out = dst; + p.oremain = *dlen; + p.ototal = 0; + + *dlen = 0; + + if (read_constraints(&c)) + usehw = false; + + hdr = (struct nx842_crypto_header *)src; + + /* If it doesn't start with our header magic number, assume it's a raw + * 842 compressed buffer and pass it directly to the hardware driver + */ + if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) { + struct nx842_crypto_header_group g = { + .padding = 0, + .compressed_length = cpu_to_be32(p.iremain), + .uncompressed_length = cpu_to_be32(p.oremain), + }; + + ret = decompress(ctx, &p, &g, &c, 0, usehw); + if (ret) + return ret; + + *dlen = p.ototal; + + return 0; + } + + if (!hdr->groups) { + pr_err("header has no groups\n"); + return -EINVAL; + } + if (hdr->groups > NX842_CRYPTO_GROUP_MAX) { + pr_err("header has too many groups %x, max %x\n", + hdr->groups, NX842_CRYPTO_GROUP_MAX); + return -EINVAL; + } + + hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups); + if (hdr_len > slen) + return -EOVERFLOW; + + memcpy(&ctx->header, src, hdr_len); + hdr = &ctx->header; + + for (n = 0; n < hdr->groups; n++) { + /* ignore applies to last group */ + if (n + 1 == hdr->groups) + ignore = be16_to_cpu(hdr->ignore); + + ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw); + if (ret) + return ret; + } + + *dlen = p.ototal; + + pr_debug("decompress total slen %x dlen %x\n", slen, *dlen); + + return 0; +} + +static struct crypto_alg alg = { + .cra_name = "842", + .cra_driver_name = "842-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct nx842_crypto_ctx), + .cra_module = THIS_MODULE, + .cra_init = nx842_crypto_init, + .cra_exit = nx842_crypto_exit, + .cra_u = { .compress = { + .coa_compress = nx842_crypto_compress, + .coa_decompress = nx842_crypto_decompress } } +}; + +static int __init nx842_crypto_mod_init(void) +{ + return crypto_register_alg(&alg); +} +module_init(nx842_crypto_mod_init); + +static void __exit nx842_crypto_mod_exit(void) +{ + crypto_unregister_alg(&alg); +} +module_exit(nx842_crypto_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface"); +MODULE_ALIAS_CRYPTO("842"); +MODULE_ALIAS_CRYPTO("842-nx"); +MODULE_AUTHOR("Dan Streetman "); -- cgit v1.2.3 From 551d7ed2fdee1fb7bba2dbd55b61e821a10a7c51 Mon Sep 17 00:00:00 2001 From: "Allan, Bruce W" Date: Thu, 7 May 2015 17:00:42 -0700 Subject: crypto: qat - add driver version Signed-off-by: Bruce Allan Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_common_drv.h | 7 +++++++ drivers/crypto/qat/qat_common/adf_ctl_drv.c | 1 + drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 1 + 3 files changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 0666ee6a3360..27e16c09230b 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -53,6 +53,13 @@ #include "icp_qat_fw_loader_handle.h" #include "icp_qat_hal.h" +#define ADF_MAJOR_VERSION 0 +#define ADF_MINOR_VERSION 1 +#define ADF_BUILD_VERSION 3 +#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ + __stringify(ADF_MINOR_VERSION) "." \ + __stringify(ADF_BUILD_VERSION) + #define ADF_STATUS_RESTARTING 0 #define ADF_STATUS_STARTING 1 #define ADF_STATUS_CONFIGURED 2 diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index cb5f066e93a6..e056b9e9bf8a 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -504,3 +504,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_ALIAS_CRYPTO("intel_qat"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index c1b3f0b342e2..ecf0ef1fcda8 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -419,3 +419,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_FIRMWARE(ADF_DH895XCC_FW); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); -- cgit v1.2.3 From b579d42672b41dc7f1641df7790dfd906fee7aa8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:47:43 +0800 Subject: crypto: caam - Include internal/aead.h All AEAD implementations must include internal/aead.h in order to access required helpers. Signed-off-by: Herbert Xu --- drivers/crypto/caam/compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index acd7743e2603..f57f395db33f 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From 5290b4283098820a3d28cd1bf72730a9bdeef031 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:47:44 +0800 Subject: crypto: ixp4xx - Include internal/aead.h All AEAD implementations must include internal/aead.h in order to access required helpers. Signed-off-by: Herbert Xu --- drivers/crypto/ixp4xx_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 48f453555f1f..a8875b48759e 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include -- cgit v1.2.3 From 2d78db0935d183fcc1be02fe76d86e1594d99195 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:47:45 +0800 Subject: crypto: nx - Include internal/aead.h All AEAD implementations must include internal/aead.h in order to access required helpers. Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 5da5b98b8f29..135817fd7b2d 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -15,7 +15,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include +#include #include #include #include -- cgit v1.2.3 From 0ed6264b60fc5b59dc0dc26a29139d9dcd759a67 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:47:46 +0800 Subject: crypto: qat - Include internal/aead.h All AEAD implementations must include internal/aead.h in order to access required helpers. Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 1dc5b0a17cf7..070abcd0f2b3 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From e98014abd84884ba0c6db1320de87fac1529b47e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:47:48 +0800 Subject: crypto: talitos - Include internal/aead.h All AEAD implementations must include internal/aead.h in order to access required helpers. Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c04074d08461..4c0778b03de6 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From add86d55768e5e301be38b2961df35f26f9d5fbe Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:47:50 +0800 Subject: crypto: caam - Remove unnecessary reference to crt_aead crt_aead is an internal implementation detail and must not be used outside of the crypto API itself. This patch replaces the unnecessary uses of crt_aead with crypto_aead_ivsize. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 96 +++++++++++++++++++++---------------------- 1 file changed, 48 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 3c025d407aff..a34fc95ab8dc 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -258,7 +258,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, static int aead_null_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; + unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; @@ -383,7 +383,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) /* assoclen + cryptlen = seqinlen - ivsize - authsize */ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize); + ctx->authsize + ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); @@ -449,7 +449,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) static int aead_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; + unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct crypto_tfm *ctfm = crypto_aead_tfm(aead); const char *alg_name = crypto_tfm_alg_name(ctfm); @@ -510,7 +510,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); @@ -518,7 +518,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* read assoc before reading payload */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | KEY_VLF); - aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); + aead_append_ld_iv(desc, ivsize, ctx1_iv_off); /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) @@ -577,7 +577,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* assoclen + cryptlen = seqinlen - ivsize - authsize */ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize); + ctx->authsize + ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); @@ -586,7 +586,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | KEY_VLF); - aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); + aead_append_ld_iv(desc, ivsize, ctx1_iv_off); /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) @@ -645,20 +645,20 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Generate IV */ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); /* Copy IV to class 1 context */ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* Return to encryption */ append_operation(desc, ctx->class2_alg_type | @@ -676,10 +676,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Copy iv from outfifo to class 2 fifo */ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | - NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); - append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); /* Load Counter into CONTEXT1 reg */ @@ -698,7 +698,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Not need to reload iv */ - append_seq_fifo_load(desc, tfm->ivsize, + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); /* Will read cryptlen */ @@ -738,7 +738,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, static int gcm_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; + unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; @@ -781,7 +781,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ); @@ -791,7 +791,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); /* read IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); /* if assoclen is ZERO, skip reading the assoc data */ @@ -824,7 +824,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); /* read IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); /* read assoc data */ @@ -836,7 +836,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) /* read IV - is the only input data */ set_jump_tgt_here(desc, zero_assoc_jump_cmd2); - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | FIFOLD_TYPE_LAST1); @@ -888,14 +888,14 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize); + ctx->authsize + ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ); /* read IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); /* jump to zero-payload command if cryptlen is zero */ @@ -968,7 +968,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int rfc4106_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; + unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; @@ -1012,7 +1012,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); @@ -1021,7 +1021,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); /* Read AES-GCM-ESP IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); /* Read assoc data */ @@ -1085,7 +1085,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize); + ctx->authsize + ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); @@ -1098,7 +1098,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); /* Read AES-GCM-ESP IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); /* Read assoc data */ @@ -1161,17 +1161,17 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) /* Generate IV */ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); /* Copy generated IV to OFIFO */ write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* Class 1 operation */ append_operation(desc, ctx->class1_alg_type | @@ -1199,7 +1199,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) /* End of blank commands */ /* No need to reload iv */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | @@ -1249,7 +1249,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, static int rfc4543_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; + unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; @@ -1291,7 +1291,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Load AES-GMAC ESP IV into Math1 register */ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | - LDST_CLASS_DECO | tfm->ivsize); + LDST_CLASS_DECO | ivsize); /* Wait the DMA transaction to finish */ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | @@ -1299,11 +1299,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Overwrite blank immediate AES-GMAC ESP IV data */ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* Overwrite blank immediate AAD data */ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* cryptlen = seqoutlen - authsize */ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); @@ -1313,7 +1313,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Read Salt and AES-GMAC ESP IV */ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + ivsize)); /* Append Salt */ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); set_move_tgt_here(desc, write_iv_cmd); @@ -1344,7 +1344,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Authenticate AES-GMAC ESP IV */ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | tfm->ivsize); + FIFOLD_TYPE_AAD | ivsize); set_move_tgt_here(desc, write_aad_cmd); /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ append_cmd(desc, 0x00000000); @@ -1407,7 +1407,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Load AES-GMAC ESP IV into Math1 register */ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | - LDST_CLASS_DECO | tfm->ivsize); + LDST_CLASS_DECO | ivsize); /* Wait the DMA transaction to finish */ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | @@ -1418,11 +1418,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Overwrite blank immediate AES-GMAC ESP IV data */ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* Overwrite blank immediate AAD data */ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); @@ -1440,7 +1440,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Read Salt and AES-GMAC ESP IV */ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + ivsize)); /* Append Salt */ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); set_move_tgt_here(desc, write_iv_cmd); @@ -1461,7 +1461,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Authenticate AES-GMAC ESP IV */ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | tfm->ivsize); + FIFOLD_TYPE_AAD | ivsize); set_move_tgt_here(desc, write_aad_cmd); /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ append_cmd(desc, 0x00000000); @@ -1527,26 +1527,26 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Generate IV */ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); /* Move generated IV to Math1 register */ append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); /* Overwrite blank immediate AES-GMAC IV data */ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* Overwrite blank immediate AAD data */ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* Copy generated IV to OFIFO */ append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* Class 1 operation */ append_operation(desc, ctx->class1_alg_type | @@ -1573,7 +1573,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Read Salt and AES-GMAC generated IV */ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + ivsize)); /* Append Salt */ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); set_move_tgt_here(desc, write_iv_cmd); @@ -1583,7 +1583,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* End of blank commands */ /* No need to reload iv */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | @@ -1594,7 +1594,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) /* Authenticate AES-GMAC IV */ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | tfm->ivsize); + FIFOLD_TYPE_AAD | ivsize); set_move_tgt_here(desc, write_aad_cmd); /* Blank commands. Will be overwritten by AES-GMAC IV. */ append_cmd(desc, 0x00000000); -- cgit v1.2.3 From 71b311d6108ecc69bf6034e03cb6bdf09335dd2c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:48:06 +0800 Subject: crypto: ixp4xx - Use crypto_aead_set_reqsize helper This patch uses the crypto_aead_set_reqsize helper to avoid directly touching the internals of aead. Signed-off-by: Herbert Xu --- drivers/crypto/ixp4xx_crypto.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index a8875b48759e..46cd6966dc45 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -575,7 +575,8 @@ static int init_tfm_ablk(struct crypto_tfm *tfm) static int init_tfm_aead(struct crypto_tfm *tfm) { - tfm->crt_aead.reqsize = sizeof(struct aead_ctx); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct aead_ctx)); return init_tfm(tfm); } -- cgit v1.2.3 From 9611ef63c27709f57639ab49fe3977c5947038a5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:48:08 +0800 Subject: crypto: picoxcell - Use crypto_aead_set_reqsize helper This patch uses the crypto_aead_set_reqsize helper to avoid directly touching the internals of aead. Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 135817fd7b2d..eb2a0ca49eda 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -790,7 +790,8 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm) get_random_bytes(ctx->salt, sizeof(ctx->salt)); - tfm->crt_aead.reqsize = sizeof(struct spacc_req); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct spacc_req)); return 0; } -- cgit v1.2.3 From 97cacb9f7a1e7502e7e8e7964b00dc191be565eb Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 May 2015 17:48:09 +0800 Subject: crypto: qat - Use crypto_aead_set_reqsize helper This patch uses the crypto_aead_set_reqsize helper to avoid directly touching the internals of aead. Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 070abcd0f2b3..dc231b860a1e 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -1094,8 +1094,9 @@ static int qat_alg_aead_init(struct crypto_tfm *tfm, return -EFAULT; spin_lock_init(&ctx->lock); ctx->qat_hash_alg = hash; - tfm->crt_aead.reqsize = sizeof(struct aead_request) + - sizeof(struct qat_crypto_request); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct aead_request) + + sizeof(struct qat_crypto_request)); ctx->tfm = tfm; return 0; } -- cgit v1.2.3 From 608f37d0f83e4359f48121949778bf912cd56800 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 11 May 2015 13:10:09 +0300 Subject: crypto: talitos - fix size calculation in talitos_edesc_alloc() The + operation has higher precedence than ?: so we need parentheses here. Otherwise we may end up allocating a max of only one "cryptlen" instead of two. Fixes: 6f65f6ac5fb3 ('crypto: talitos - implement scatter/gather copy for SEC1') Signed-off-by: Dan Carpenter Acked-by: Christophe Leroy Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 4c0778b03de6..aaa7af856b39 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1335,8 +1335,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, alloc_len = sizeof(struct talitos_edesc); if (assoc_nents || src_nents || dst_nents) { if (is_sec1) - dma_len = src_nents ? cryptlen : 0 + - dst_nents ? cryptlen : 0; + dma_len = (src_nents ? cryptlen : 0) + + (dst_nents ? cryptlen : 0); else dma_len = (src_nents + dst_nents + 2 + assoc_nents) * sizeof(struct talitos_ptr) + authsize; -- cgit v1.2.3 From 5fa7dadc898567ce14d6d6d427e7bd8ce6eb5d39 Mon Sep 17 00:00:00 2001 From: Horia Geant? Date: Mon, 11 May 2015 20:03:24 +0300 Subject: crypto: talitos - avoid memleak in talitos_alg_alloc() Cc: # 3.2+ Fixes: 1d11911a8c57 ("crypto: talitos - fix warning: 'alg' may be used uninitialized in this function") Signed-off-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index aaa7af856b39..73a400483696 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2813,6 +2813,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, break; default: dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); + kfree(t_alg); return ERR_PTR(-EINVAL); } -- cgit v1.2.3 From 69d9cd8c592f1abce820dbce7181bbbf6812cfbd Mon Sep 17 00:00:00 2001 From: Horia Geant? Date: Mon, 11 May 2015 20:04:49 +0300 Subject: Revert "crypto: talitos - convert to use be16_add_cpu()" This reverts commit 7291a932c6e27d9768e374e9d648086636daf61c. The conversion to be16_add_cpu() is incorrect in case cryptlen is negative due to premature (i.e. before addition / subtraction) implicit conversion of cryptlen (int -> u16) leading to sign loss. Cc: # 3.10+ Cc: Wei Yongjun Signed-off-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 73a400483696..5f7c74d0afc3 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1083,7 +1083,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, sg_count--; link_tbl_ptr--; } - be16_add_cpu(&link_tbl_ptr->len, cryptlen); + link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) + + cryptlen); /* tag end of link table */ link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; -- cgit v1.2.3 From bde9079f3cd5e4275be2169cf7b0c74bfb464f78 Mon Sep 17 00:00:00 2001 From: Horia Geant? Date: Tue, 12 May 2015 11:28:05 +0300 Subject: crypto: talitos - avoid out of bound scatterlist iterator Check return value of scatterlist_sg_next(), i.e. don't rely solely on number of bytes to be processed or number of scatterlist entries. Signed-off-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 5f7c74d0afc3..ba5f68b987ab 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1065,7 +1065,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, { int n_sg = sg_count; - while (n_sg--) { + while (sg && n_sg--) { to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0); link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); link_tbl_ptr->j_extent = 0; @@ -1254,7 +1254,7 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) int sg_nents = 0; *chained = false; - while (nbytes > 0) { + while (nbytes > 0 && sg) { sg_nents++; nbytes -= sg->length; if (!sg_is_last(sg) && (sg + 1)->length == 0) -- cgit v1.2.3 From 42e8b0d7fed0628e21ec93e8cd547aef616e68f7 Mon Sep 17 00:00:00 2001 From: Horia Geant? Date: Mon, 11 May 2015 20:04:56 +0300 Subject: crypto: talitos - static code checker fixes -change req_ctx->nbuf from u64 to unsigned int to silence checker warnings; this is safe since nbuf value is <= HASH_MAX_BLOCK_SIZE -remove unused value read from TALITOS_CCPSR; there is no requirement to read upper 32b before reading lower 32b of a 64b register; SEC RM mentions: "reads can always be done by byte, word, or dword" -remove unused return value of sg_to_link_tbl() -change "len" parameter of map_single_talitos_ptr() and to_talitos_ptr_len() to unsigned int; later, cpu_to_be16 will __force downcast the value to unsigned short without any checker warning Signed-off-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index ba5f68b987ab..83aca95a95bc 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -63,7 +63,7 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, ptr->eptr = upper_32_bits(dma_addr); } -static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len, +static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, bool is_sec1) { if (is_sec1) { @@ -94,7 +94,7 @@ static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) */ static void map_single_talitos_ptr(struct device *dev, struct talitos_ptr *ptr, - unsigned short len, void *data, + unsigned int len, void *data, enum dma_data_direction dir) { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); @@ -543,7 +543,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; int ch, error, reset_dev = 0; - u32 v, v_lo; + u32 v_lo; bool is_sec1 = has_ftr_sec1(priv); int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ @@ -560,7 +560,6 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) error = -EINVAL; - v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR); v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); if (v_lo & TALITOS_CCPSR_LO_DOF) { @@ -815,7 +814,7 @@ struct talitos_ahash_req_ctx { unsigned int first; unsigned int last; unsigned int to_hash_later; - u64 nbuf; + unsigned int nbuf; struct scatterlist bufsl[2]; struct scatterlist *psrc; }; @@ -1639,8 +1638,7 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, (edesc->src_nents + 1) * sizeof(struct talitos_ptr), 0); ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; - sg_count = sg_to_link_tbl(dst, sg_count, len, - link_tbl_ptr); + sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); -- cgit v1.2.3 From 604c31039dae4653f33003d08c91ef58b70b5e63 Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Sun, 8 Mar 2015 11:01:01 +0100 Subject: crypto: omap-sham - Check for return value from pm_runtime_get_sync MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Function pm_runtime_get_sync could fail and we need to check return value to prevent kernel crash. Signed-off-by: Pali Rohár Acked-by: Pavel Machek Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4d63e0d4da9a..a6703204fd86 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -362,7 +362,13 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) static int omap_sham_hw_init(struct omap_sham_dev *dd) { - pm_runtime_get_sync(dd->dev); + int err; + + err = pm_runtime_get_sync(dd->dev); + if (err < 0) { + dev_err(dd->dev, "failed to get sync: %d\n", err); + return err; + } if (!test_bit(FLAGS_INIT, &dd->flags)) { set_bit(FLAGS_INIT, &dd->flags); @@ -1947,7 +1953,13 @@ static int omap_sham_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_irq_safe(dev); - pm_runtime_get_sync(dev); + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get sync: %d\n", err); + goto err_pm; + } + rev = omap_sham_read(dd, SHA_REG_REV(dd)); pm_runtime_put_sync(&pdev->dev); @@ -1977,6 +1989,7 @@ err_algs: for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); +err_pm: pm_runtime_disable(dev); if (dd->dma_lch) dma_release_channel(dd->dma_lch); @@ -2019,7 +2032,11 @@ static int omap_sham_suspend(struct device *dev) static int omap_sham_resume(struct device *dev) { - pm_runtime_get_sync(dev); + int err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get sync: %d\n", err); + return err; + } return 0; } #endif -- cgit v1.2.3 From 733a82e24e7aa405b4698bf21e57ec7c77cab2cf Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Wed, 13 May 2015 14:45:30 -0700 Subject: crypto: qat - remove unused structure members Cleanup unused structure members. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_cfg_user.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h index 0c38a155a865..ef5988afd4c6 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_user.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -53,14 +53,6 @@ struct adf_user_cfg_key_val { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; - union { - char *user_val_ptr; - uint64_t padding1; - }; - union { - struct adf_user_cfg_key_val *prev; - uint64_t padding2; - }; union { struct adf_user_cfg_key_val *next; uint64_t padding3; @@ -74,10 +66,6 @@ struct adf_user_cfg_section { struct adf_user_cfg_key_val *params; uint64_t padding1; }; - union { - struct adf_user_cfg_section *prev; - uint64_t padding2; - }; union { struct adf_user_cfg_section *next; uint64_t padding3; -- cgit v1.2.3 From 3848d3478669f5d8b3d03dcf758960d2d1870f4a Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Wed, 13 May 2015 14:45:31 -0700 Subject: crypto: qat - rm unneeded header include Don't need proc_fs.h Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_accel_devices.h | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index f22ce7169fa5..5fe902967620 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -48,7 +48,6 @@ #define ADF_ACCEL_DEVICES_H_ #include #include -#include #include #include "adf_cfg_common.h" -- cgit v1.2.3 From 42cb0c7bdf9ddd33b33d195414f9171e7fa7472c Mon Sep 17 00:00:00 2001 From: Paulo Flabiano Smorigo Date: Thu, 14 May 2015 12:21:04 -0300 Subject: crypto: vmx - fix two mistyped texts One mistyped description and another mistyped target were corrected. Signed-off-by: Paulo Flabiano Smorigo Signed-off-by: Herbert Xu --- drivers/crypto/vmx/Makefile | 2 +- drivers/crypto/vmx/vmx.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile index c699c6e6c82e..d28ab96a2475 100644 --- a/drivers/crypto/vmx/Makefile +++ b/drivers/crypto/vmx/Makefile @@ -4,7 +4,7 @@ vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o gha ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) TARGET := linux-ppc64le else -TARGET := linux-pcc64 +TARGET := linux-ppc64 endif quiet_cmd_perl = PERL $@ diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index 44d8d5cfe40d..4c398ddd8c10 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c @@ -82,7 +82,7 @@ module_init(p8_init); module_exit(p8_exit); MODULE_AUTHOR("Marcelo Cerri"); -MODULE_DESCRIPTION("IBM VMX cryptogaphic acceleration instructions support on Power 8"); +MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions support on Power 8"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); -- cgit v1.2.3 From 9358eac06b8fcf9fcaf566ec825fc05e2b1acd10 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Fri, 15 May 2015 11:07:33 -0400 Subject: crypto: nx - remove 842-nx null checks Remove the null checks for tfm, src, slen, dst, dlen; tfm will never be null and the other fields are always expected to be set correctly. Reported-by: Dan Carpenter Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842-crypto.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c index cb177c317179..2ffa1031c91f 100644 --- a/drivers/crypto/nx/nx-842-crypto.c +++ b/drivers/crypto/nx/nx-842-crypto.c @@ -298,9 +298,6 @@ static int nx842_crypto_compress(struct crypto_tfm *tfm, bool add_header; u16 ignore = 0; - if (!tfm || !src || !slen || !dst || !dlen) - return -EINVAL; - p.in = (u8 *)src; p.iremain = slen; p.out = dst; @@ -483,9 +480,6 @@ static int nx842_crypto_decompress(struct crypto_tfm *tfm, u16 ignore = 0; bool usehw = true; - if (!tfm || !src || !slen || !dst || !dlen) - return -EINVAL; - p.in = (u8 *)src; p.iremain = slen; p.out = dst; -- cgit v1.2.3 From d0bb9ee316b0c1a22fc3b9e6d1fef1e4521cbf5e Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Sun, 17 May 2015 12:54:16 +0200 Subject: crypto: n2 - use md5 IV MD5_HX instead of their raw value Since MD5 IV are now available in crypto/md5.h, use them. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index afd136b45f49..722392f76d95 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1281,10 +1281,10 @@ static const char md5_zero[MD5_DIGEST_SIZE] = { 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, }; static const u32 md5_init[MD5_HASH_WORDS] = { - cpu_to_le32(0x67452301), - cpu_to_le32(0xefcdab89), - cpu_to_le32(0x98badcfe), - cpu_to_le32(0x10325476), + cpu_to_le32(MD5_H0), + cpu_to_le32(MD5_H1), + cpu_to_le32(MD5_H2), + cpu_to_le32(MD5_H3), }; static const char sha1_zero[SHA1_DIGEST_SIZE] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, -- cgit v1.2.3 From eddca85b1a9ab451772791d03af1f016e9c4ea0b Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Thu, 26 Feb 2015 14:49:53 +0100 Subject: crypto: omap-sham - Add support for omap3 devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit omap3 support is same as omap2, just with different IO address (specified in DT) Signed-off-by: Pali Rohár Acked-by: Pavel Machek Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a6703204fd86..b2024c95a3cf 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1798,6 +1798,10 @@ static const struct of_device_id omap_sham_of_match[] = { .compatible = "ti,omap2-sham", .data = &omap_sham_pdata_omap2, }, + { + .compatible = "ti,omap3-sham", + .data = &omap_sham_pdata_omap2, + }, { .compatible = "ti,omap4-sham", .data = &omap_sham_pdata_omap4, -- cgit v1.2.3 From ae13ed44382c8d67321bc789166be98fbb7061d9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 21 May 2015 15:11:03 +0800 Subject: crypto: caam - Use old_aead_alg This patch replaces references to aead_alg with old_aead_alg. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index a34fc95ab8dc..3d850ab47c28 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3379,7 +3379,7 @@ struct caam_alg_template { u32 type; union { struct ablkcipher_alg ablkcipher; - struct aead_alg aead; + struct old_aead_alg aead; } template_u; u32 class1_alg_type; u32 class2_alg_type; -- cgit v1.2.3 From 6da9c2335e5f7124521a701e2a883b46e3d6a215 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 21 May 2015 15:11:06 +0800 Subject: crypto: ixp4xx - Use crypto_aead_maxauthsize This patch uses the helper crypto_aead_maxauthsize instead of directly dereferencing aead_alg. Signed-off-by: Herbert Xu --- drivers/crypto/ixp4xx_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 46cd6966dc45..7ba495f75370 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1097,7 +1097,7 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) { struct ixp_ctx *ctx = crypto_aead_ctx(tfm); u32 *flags = &tfm->base.crt_flags; - unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize; + unsigned digest_len = crypto_aead_maxauthsize(tfm); int ret; if (!ctx->enckey_len && !ctx->authkey_len) @@ -1139,7 +1139,7 @@ out: static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - int max = crypto_aead_alg(tfm)->maxauthsize >> 2; + int max = crypto_aead_maxauthsize(tfm) >> 2; if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) return -EINVAL; -- cgit v1.2.3 From 56fcf73a29007aa7bec2e3fc5da2962f3f72d610 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 21 May 2015 15:11:07 +0800 Subject: crypto: nx - Remove unnecessary maxauthsize check The crypto layer already checks maxauthsize when setauthsize is called. So there is no need to check it again within setauthsize. Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-gcm.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 88c562434bc0..e4e64f688158 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -96,9 +96,6 @@ out: static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - if (authsize > crypto_aead_alg(tfm)->maxauthsize) - return -EINVAL; - crypto_aead_crt(tfm)->authsize = authsize; return 0; -- cgit v1.2.3 From 2ea376a3adf8ddf30c50c2709bbf3cdab9baa6a8 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Fri, 22 May 2015 15:33:47 +0200 Subject: crypto: mv_cesa - request registers memory region The mv_cesa driver does not request the CESA registers memory region. Since we're about to add a new CESA driver, we need to make sure only one of these drivers probe the CESA device, and requesting the registers memory region is a good way to achieve that. Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/mv_cesa.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index e63efbd840b5..eb645c2cf3eb 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -1041,23 +1041,23 @@ static int mv_probe(struct platform_device *pdev) spin_lock_init(&cp->lock); crypto_init_queue(&cp->queue, 50); - cp->reg = ioremap(res->start, resource_size(res)); - if (!cp->reg) { - ret = -ENOMEM; + cp->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cp->reg)) { + ret = PTR_ERR(cp->reg); goto err; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); if (!res) { ret = -ENXIO; - goto err_unmap_reg; + goto err; } cp->sram_size = resource_size(res); cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; cp->sram = ioremap(res->start, cp->sram_size); if (!cp->sram) { ret = -ENOMEM; - goto err_unmap_reg; + goto err; } if (pdev->dev.of_node) @@ -1136,8 +1136,6 @@ err_thread: kthread_stop(cp->queue_th); err_unmap_sram: iounmap(cp->sram); -err_unmap_reg: - iounmap(cp->reg); err: kfree(cp); cpg = NULL; @@ -1158,7 +1156,6 @@ static int mv_remove(struct platform_device *pdev) free_irq(cp->irq, cp); memset(cp->sram, 0, cp->sram_size); iounmap(cp->sram); - iounmap(cp->reg); if (!IS_ERR(cp->clk)) { clk_disable_unprepare(cp->clk); -- cgit v1.2.3 From 1d9de44e268d880cbe2d0bd3be1ef0661f93fd34 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 21 May 2015 16:19:54 +0800 Subject: random: Wake up all getrandom(2) callers when pool is ready If more than one application invokes getrandom(2) before the pool is ready, then all bar one will be stuck forever because we use wake_up_interruptible which wakes up a single task. This patch replaces it with wake_up_all. Signed-off-by: Herbert Xu --- drivers/char/random.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 9cd6968e2f92..8b8c46b5fd5c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -660,7 +660,7 @@ retry: r->entropy_total = 0; if (r == &nonblocking_pool) { prandom_reseed_late(); - wake_up_interruptible(&urandom_init_wait); + wake_up_all(&urandom_init_wait); pr_notice("random: %s pool is initialized\n", r->name); } } -- cgit v1.2.3 From 16b369a91d0dd80be214b7f7801fbc51875454cc Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Mon, 25 May 2015 15:08:47 +0200 Subject: random: Blocking API for accessing nonblocking_pool The added API calls provide a synchronous function call get_blocking_random_bytes where the caller is blocked until the nonblocking_pool is initialized. CC: Andreas Steffen CC: Theodore Ts'o CC: Sandy Harris Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- drivers/char/random.c | 12 ++++++++++++ include/linux/random.h | 1 + 2 files changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 8b8c46b5fd5c..159d0700f7d8 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1244,6 +1244,18 @@ void get_random_bytes(void *buf, int nbytes) } EXPORT_SYMBOL(get_random_bytes); +/* + * Equivalent function to get_random_bytes with the difference that this + * function blocks the request until the nonblocking_pool is initialized. + */ +void get_blocking_random_bytes(void *buf, int nbytes) +{ + if (unlikely(nonblocking_pool.initialized == 0)) + wait_event(urandom_init_wait, nonblocking_pool.initialized); + extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); +} +EXPORT_SYMBOL(get_blocking_random_bytes); + /* * This function will use the architecture-specific hardware random * number generator if it is available. The arch-specific hw RNG will diff --git a/include/linux/random.h b/include/linux/random.h index b05856e16b75..796267d56901 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -14,6 +14,7 @@ extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_interrupt_randomness(int irq, int irq_flags); extern void get_random_bytes(void *buf, int nbytes); +extern void get_blocking_random_bytes(void *buf, int nbytes); extern void get_random_bytes_arch(void *buf, int nbytes); void generate_random_uuid(unsigned char uuid_out[16]); extern int random_int_secret_init(void); -- cgit v1.2.3 From d921620e03c58bd83c4b345e36a104988d697f0d Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Tue, 26 May 2015 13:06:24 -0500 Subject: crypto: ccp - Remove manual check and set of dma_mask pointer The underlying device support will set the device dma_mask pointer if DMA is set up properly for the device. Remove the check for and assignment of dma_mask when it is null. Instead, just error out if the dma_set_mask_and_coherent function fails because dma_mask is null. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-platform.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index b1c20b2b5647..c0aa5c5c5f9d 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c @@ -174,8 +174,6 @@ static int ccp_platform_probe(struct platform_device *pdev) } ccp->io_regs = ccp->io_map; - if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); -- cgit v1.2.3 From d725332208ef13241fc435eece790c9d0ea16a4e Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Tue, 26 May 2015 13:06:30 -0500 Subject: crypto: ccp - Remove unused structure field Remove the length field from the ccp_sg_workarea since it is unused. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-ops.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 71f2e3c89424..542453c9ba1c 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -53,7 +53,6 @@ struct ccp_dm_workarea { struct ccp_sg_workarea { struct scatterlist *sg; unsigned int nents; - unsigned int length; struct scatterlist *dma_sg; struct device *dma_dev; @@ -497,7 +496,6 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, return 0; wa->nents = sg_nents(sg); - wa->length = sg->length; wa->bytes_left = len; wa->sg_used = 0; -- cgit v1.2.3 From bfa1ce5f38938cc9e6c7f2d1011f88eba2b9e2b2 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 28 May 2015 11:40:54 +0200 Subject: bus: mvebu-mbus: add mv_mbus_dram_info_nooverlap() This commit introduces a variant of the mv_mbus_dram_info() function called mv_mbus_dram_info_nooverlap(). Both functions are used by Marvell drivers supporting devices doing DMA, and provide them a description the DRAM ranges that they need to configure their DRAM windows. The ranges provided by the mv_mbus_dram_info() function may overlap with the I/O windows if there is a lot (>= 4 GB) of RAM installed. This is not a problem for most of the DMA masters, except for the upcoming new CESA crypto driver because it does DMA to the SRAM, which is mapped through an I/O window. For this unit, we need to have DRAM ranges that do not overlap with the I/O windows. A first implementation done in commit 1737cac69369 ("bus: mvebu-mbus: make sure SDRAM CS for DMA don't overlap the MBus bridge window"), changed the information returned by mv_mbus_dram_info() to match this requirement. However, it broke the requirement of the other DMA masters than the DRAM ranges should have power of two sizes. To solve this situation, this commit introduces a new mv_mbus_dram_info_nooverlap() function, which returns the same information as mv_mbus_dram_info(), but guaranteed to not overlap with the I/O windows. In the end, it gives us two variants of the mv_mbus_dram_info*() functions: - The normal one, mv_mbus_dram_info(), which has been around for many years. This function returns the raw DRAM ranges, which are guaranteed to use power of two sizes, but will overlap with I/O windows. This function will therefore be used by all DMA masters (SATA, XOR, Ethernet, etc.) except the CESA crypto driver. - The new 'nooverlap' variant, mv_mbus_dram_info_nooverlap(). This function returns DRAM ranges after they have been "tweaked" to make sure they don't overlap with I/O windows. By doing this tweaking, we remove the power of two size guarantee. This variant will be used by the new CESA crypto driver. Signed-off-by: Thomas Petazzoni Signed-off-by: Gregory CLEMENT --- drivers/bus/mvebu-mbus.c | 117 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mbus.h | 5 ++ 2 files changed, 122 insertions(+) (limited to 'drivers') diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 6f047dcb94c2..c43c3d2baf73 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -57,6 +57,7 @@ #include #include #include +#include #include /* @@ -152,13 +153,39 @@ struct mvebu_mbus_state { static struct mvebu_mbus_state mbus_state; +/* + * We provide two variants of the mv_mbus_dram_info() function: + * + * - The normal one, where the described DRAM ranges may overlap with + * the I/O windows, but for which the DRAM ranges are guaranteed to + * have a power of two size. Such ranges are suitable for the DMA + * masters that only DMA between the RAM and the device, which is + * actually all devices except the crypto engines. + * + * - The 'nooverlap' one, where the described DRAM ranges are + * guaranteed to not overlap with the I/O windows, but for which the + * DRAM ranges will not have power of two sizes. They will only be + * aligned on a 64 KB boundary, and have a size multiple of 64 + * KB. Such ranges are suitable for the DMA masters that DMA between + * the crypto SRAM (which is mapped through an I/O window) and a + * device. This is the case for the crypto engines. + */ + static struct mbus_dram_target_info mvebu_mbus_dram_info; +static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap; + const struct mbus_dram_target_info *mv_mbus_dram_info(void) { return &mvebu_mbus_dram_info; } EXPORT_SYMBOL_GPL(mv_mbus_dram_info); +const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) +{ + return &mvebu_mbus_dram_info_nooverlap; +} +EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap); + /* Checks whether the given window has remap capability */ static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus, const int win) @@ -576,6 +603,95 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) return MVEBU_MBUS_NO_REMAP; } +/* + * Use the memblock information to find the MBus bridge hole in the + * physical address space. + */ +static void __init +mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) +{ + struct memblock_region *r; + uint64_t s = 0; + + for_each_memblock(memory, r) { + /* + * This part of the memory is above 4 GB, so we don't + * care for the MBus bridge hole. + */ + if (r->base >= 0x100000000ULL) + continue; + + /* + * The MBus bridge hole is at the end of the RAM under + * the 4 GB limit. + */ + if (r->base + r->size > s) + s = r->base + r->size; + } + + *start = s; + *end = 0x100000000ULL; +} + +/* + * This function fills in the mvebu_mbus_dram_info_nooverlap data + * structure, by looking at the mvebu_mbus_dram_info data, and + * removing the parts of it that overlap with I/O windows. + */ +static void __init +mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus) +{ + uint64_t mbus_bridge_base, mbus_bridge_end; + int cs_nooverlap = 0; + int i; + + mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end); + + for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) { + struct mbus_dram_window *w; + u64 base, size, end; + + w = &mvebu_mbus_dram_info.cs[i]; + base = w->base; + size = w->size; + end = base + size; + + /* + * The CS is fully enclosed inside the MBus bridge + * area, so ignore it. + */ + if (base >= mbus_bridge_base && end <= mbus_bridge_end) + continue; + + /* + * Beginning of CS overlaps with end of MBus, raise CS + * base address, and shrink its size. + */ + if (base >= mbus_bridge_base && end > mbus_bridge_end) { + size -= mbus_bridge_end - base; + base = mbus_bridge_end; + } + + /* + * End of CS overlaps with beginning of MBus, shrink + * CS size. + */ + if (base < mbus_bridge_base && end > mbus_bridge_base) + size -= end - mbus_bridge_base; + + w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++]; + w->cs_index = i; + w->mbus_attr = 0xf & ~(1 << i); + if (mbus->hw_io_coherency) + w->mbus_attr |= ATTR_HW_COHERENCY; + w->base = base; + w->size = size; + } + + mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR; + mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap; +} + static void __init mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) { @@ -964,6 +1080,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, mvebu_mbus_disable_window(mbus, win); mbus->soc->setup_cpu_target(mbus); + mvebu_mbus_setup_cpu_target_nooverlap(mbus); if (is_coherent) writel(UNIT_SYNC_BARRIER_ALL, diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 611b69fa8594..1f7bc630d225 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -54,11 +54,16 @@ struct mbus_dram_target_info */ #ifdef CONFIG_PLAT_ORION extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); +extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void); #else static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) { return NULL; } +static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) +{ + return NULL; +} #endif int mvebu_mbus_save_cpu_target(u32 *store_addr); -- cgit v1.2.3 From 3e648cbeb31be5cb84b9ec19822e2b85417f07c4 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 28 May 2015 16:21:31 -0400 Subject: crypto: nx - prevent nx 842 load if no hw driver Change the nx-842 common driver to wait for loading of both platform drivers, and fail loading if the platform driver pointer is not set. Add an independent platform driver pointer, that the platform drivers set if they find they are able to load (i.e. if they find their platform devicetree node(s)). The problem is currently, the main nx-842 driver will stay loaded even if there is no platform driver and thus no possible way it can do any compression or decompression. This allows the crypto 842-nx driver to load even if it won't actually work. For crypto compression users (e.g. zswap) that expect an available crypto compression driver to actually work, this is bad. This patch fixes that, so the 842-nx crypto compression driver won't load if it doesn't have the driver and hardware available to perform the compression. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/Makefile | 3 +- drivers/crypto/nx/nx-842-platform.c | 84 +++++++++++++++++++++++ drivers/crypto/nx/nx-842-powernv.c | 17 +++-- drivers/crypto/nx/nx-842-pseries.c | 33 ++++++--- drivers/crypto/nx/nx-842.c | 130 ++++++------------------------------ drivers/crypto/nx/nx-842.h | 16 ++--- 6 files changed, 148 insertions(+), 135 deletions(-) create mode 100644 drivers/crypto/nx/nx-842-platform.c (limited to 'drivers') diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index 868b5e630794..e1684f5adb11 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -10,11 +10,12 @@ nx-crypto-objs := nx.o \ nx-sha256.o \ nx-sha512.o -obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o nx-compress-objs := nx-842.o +nx-compress-platform-objs := nx-842-platform.o nx-compress-pseries-objs := nx-842-pseries.o nx-compress-powernv-objs := nx-842-powernv.o nx-compress-crypto-objs := nx-842-crypto.o diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c new file mode 100644 index 000000000000..664f13dd06ed --- /dev/null +++ b/drivers/crypto/nx/nx-842-platform.c @@ -0,0 +1,84 @@ + +#include "nx-842.h" + +/* this is needed, separate from the main nx-842.c driver, because that main + * driver loads the platform drivers during its init(), and it expects one + * (or none) of the platform drivers to set this pointer to its driver. + * That means this pointer can't be in the main nx-842 driver, because it + * wouldn't be accessible until after the main driver loaded, which wouldn't + * be possible as it's waiting for the platform driver to load. So place it + * here. + */ +static struct nx842_driver *driver; +static DEFINE_SPINLOCK(driver_lock); + +struct nx842_driver *nx842_platform_driver(void) +{ + return driver; +} +EXPORT_SYMBOL_GPL(nx842_platform_driver); + +bool nx842_platform_driver_set(struct nx842_driver *_driver) +{ + bool ret = false; + + spin_lock(&driver_lock); + + if (!driver) { + driver = _driver; + ret = true; + } else + WARN(1, "can't set platform driver, already set to %s\n", + driver->name); + + spin_unlock(&driver_lock); + return ret; +} +EXPORT_SYMBOL_GPL(nx842_platform_driver_set); + +/* only call this from the platform driver exit function */ +void nx842_platform_driver_unset(struct nx842_driver *_driver) +{ + spin_lock(&driver_lock); + + if (driver == _driver) + driver = NULL; + else if (driver) + WARN(1, "can't unset platform driver %s, currently set to %s\n", + _driver->name, driver->name); + else + WARN(1, "can't unset platform driver, already unset\n"); + + spin_unlock(&driver_lock); +} +EXPORT_SYMBOL_GPL(nx842_platform_driver_unset); + +bool nx842_platform_driver_get(void) +{ + bool ret = false; + + spin_lock(&driver_lock); + + if (driver) + ret = try_module_get(driver->owner); + + spin_unlock(&driver_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(nx842_platform_driver_get); + +void nx842_platform_driver_put(void) +{ + spin_lock(&driver_lock); + + if (driver) + module_put(driver->owner); + + spin_unlock(&driver_lock); +} +EXPORT_SYMBOL_GPL(nx842_platform_driver_put); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dan Streetman "); +MODULE_DESCRIPTION("842 H/W Compression platform driver"); diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 6a9fb8b2d05b..c388dba7da64 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -23,7 +23,6 @@ #include #include -#define MODULE_NAME NX842_POWERNV_MODULE_NAME MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman "); MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors"); @@ -571,6 +570,7 @@ static struct nx842_constraints nx842_powernv_constraints = { }; static struct nx842_driver nx842_powernv_driver = { + .name = KBUILD_MODNAME, .owner = THIS_MODULE, .constraints = &nx842_powernv_constraints, .compress = nx842_powernv_compress, @@ -593,7 +593,7 @@ static __init int nx842_powernv_init(void) pr_info("loading\n"); - for_each_compatible_node(dn, NULL, NX842_POWERNV_COMPAT_NAME) + for_each_compatible_node(dn, NULL, "ibm,power-nx") nx842_powernv_probe(dn); if (!nx842_ct) { @@ -601,7 +601,16 @@ static __init int nx842_powernv_init(void) return -ENODEV; } - nx842_register_driver(&nx842_powernv_driver); + if (!nx842_platform_driver_set(&nx842_powernv_driver)) { + struct nx842_coproc *coproc, *n; + + list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + list_del(&coproc->list); + kfree(coproc); + } + + return -EEXIST; + } pr_info("loaded\n"); @@ -613,7 +622,7 @@ static void __exit nx842_powernv_exit(void) { struct nx842_coproc *coproc, *n; - nx842_unregister_driver(&nx842_powernv_driver); + nx842_platform_driver_unset(&nx842_powernv_driver); list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { list_del(&coproc->list); diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 85837e96e9a3..17f191777139 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -26,7 +26,6 @@ #include "nx-842.h" #include "nx_csbcpb.h" /* struct nx_csbcpb */ -#define MODULE_NAME NX842_PSERIES_MODULE_NAME MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Jennings "); MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); @@ -965,6 +964,7 @@ static struct attribute_group nx842_attribute_group = { }; static struct nx842_driver nx842_pseries_driver = { + .name = KBUILD_MODNAME, .owner = THIS_MODULE, .constraints = &nx842_pseries_constraints, .compress = nx842_pseries_compress, @@ -1033,8 +1033,6 @@ static int __init nx842_probe(struct vio_dev *viodev, goto error; } - nx842_register_driver(&nx842_pseries_driver); - return 0; error_unlock: @@ -1066,18 +1064,16 @@ static int __exit nx842_remove(struct vio_dev *viodev) kfree(old_devdata->counters); kfree(old_devdata); - nx842_unregister_driver(&nx842_pseries_driver); - return 0; } static struct vio_device_id nx842_vio_driver_ids[] = { - {NX842_PSERIES_COMPAT_NAME "-v1", NX842_PSERIES_COMPAT_NAME}, + {"ibm,compression-v1", "ibm,compression"}, {"", ""}, }; static struct vio_driver nx842_vio_driver = { - .name = MODULE_NAME, + .name = KBUILD_MODNAME, .probe = nx842_probe, .remove = __exit_p(nx842_remove), .get_desired_dma = nx842_get_desired_dma, @@ -1087,10 +1083,15 @@ static struct vio_driver nx842_vio_driver = { static int __init nx842_init(void) { struct nx842_devdata *new_devdata; + int ret; + pr_info("Registering IBM Power 842 compression driver\n"); BUILD_BUG_ON(sizeof(struct nx842_workmem) > NX842_MEM_COMPRESS); + if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) + return -ENODEV; + RCU_INIT_POINTER(devdata, NULL); new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); if (!new_devdata) { @@ -1100,7 +1101,21 @@ static int __init nx842_init(void) new_devdata->status = UNAVAILABLE; RCU_INIT_POINTER(devdata, new_devdata); - return vio_register_driver(&nx842_vio_driver); + ret = vio_register_driver(&nx842_vio_driver); + if (ret) { + pr_err("Could not register VIO driver %d\n", ret); + + kfree(new_devdata); + return ret; + } + + if (!nx842_platform_driver_set(&nx842_pseries_driver)) { + vio_unregister_driver(&nx842_vio_driver); + kfree(new_devdata); + return -EEXIST; + } + + return 0; } module_init(nx842_init); @@ -1111,6 +1126,7 @@ static void __exit nx842_exit(void) unsigned long flags; pr_info("Exiting IBM Power 842 compression driver\n"); + nx842_platform_driver_unset(&nx842_pseries_driver); spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); @@ -1120,7 +1136,6 @@ static void __exit nx842_exit(void) if (old_devdata && old_devdata->dev) dev_set_drvdata(old_devdata->dev, NULL); kfree(old_devdata); - nx842_unregister_driver(&nx842_pseries_driver); vio_unregister_driver(&nx842_vio_driver); } diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index bf2823ceaf4e..9f391d64c722 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -21,71 +21,10 @@ #include "nx-842.h" -#define MODULE_NAME "nx-compress" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman "); MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); -/* Only one driver is expected, based on the HW platform */ -static struct nx842_driver *nx842_driver; -static DEFINE_SPINLOCK(nx842_driver_lock); /* protects driver pointers */ - -void nx842_register_driver(struct nx842_driver *driver) -{ - spin_lock(&nx842_driver_lock); - - if (nx842_driver) { - pr_err("can't register driver %s, already using driver %s\n", - driver->owner->name, nx842_driver->owner->name); - } else { - pr_info("registering driver %s\n", driver->owner->name); - nx842_driver = driver; - } - - spin_unlock(&nx842_driver_lock); -} -EXPORT_SYMBOL_GPL(nx842_register_driver); - -void nx842_unregister_driver(struct nx842_driver *driver) -{ - spin_lock(&nx842_driver_lock); - - if (nx842_driver == driver) { - pr_info("unregistering driver %s\n", driver->owner->name); - nx842_driver = NULL; - } else if (nx842_driver) { - pr_err("can't unregister driver %s, using driver %s\n", - driver->owner->name, nx842_driver->owner->name); - } else { - pr_err("can't unregister driver %s, no driver in use\n", - driver->owner->name); - } - - spin_unlock(&nx842_driver_lock); -} -EXPORT_SYMBOL_GPL(nx842_unregister_driver); - -static struct nx842_driver *get_driver(void) -{ - struct nx842_driver *driver = NULL; - - spin_lock(&nx842_driver_lock); - - driver = nx842_driver; - - if (driver && !try_module_get(driver->owner)) - driver = NULL; - - spin_unlock(&nx842_driver_lock); - - return driver; -} - -static void put_driver(struct nx842_driver *driver) -{ - module_put(driver->owner); -} - /** * nx842_constraints * @@ -109,69 +48,38 @@ static void put_driver(struct nx842_driver *driver) */ int nx842_constraints(struct nx842_constraints *c) { - struct nx842_driver *driver = get_driver(); - int ret = 0; - - if (!driver) - return -ENODEV; - - BUG_ON(!c); - memcpy(c, driver->constraints, sizeof(*c)); - - put_driver(driver); - - return ret; + memcpy(c, nx842_platform_driver()->constraints, sizeof(*c)); + return 0; } EXPORT_SYMBOL_GPL(nx842_constraints); -int nx842_compress(const unsigned char *in, unsigned int in_len, - unsigned char *out, unsigned int *out_len, - void *wrkmem) +int nx842_compress(const unsigned char *in, unsigned int ilen, + unsigned char *out, unsigned int *olen, void *wmem) { - struct nx842_driver *driver = get_driver(); - int ret; - - if (!driver) - return -ENODEV; - - ret = driver->compress(in, in_len, out, out_len, wrkmem); - - put_driver(driver); - - return ret; + return nx842_platform_driver()->compress(in, ilen, out, olen, wmem); } EXPORT_SYMBOL_GPL(nx842_compress); -int nx842_decompress(const unsigned char *in, unsigned int in_len, - unsigned char *out, unsigned int *out_len, - void *wrkmem) +int nx842_decompress(const unsigned char *in, unsigned int ilen, + unsigned char *out, unsigned int *olen, void *wmem) { - struct nx842_driver *driver = get_driver(); - int ret; - - if (!driver) - return -ENODEV; - - ret = driver->decompress(in, in_len, out, out_len, wrkmem); - - put_driver(driver); - - return ret; + return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem); } EXPORT_SYMBOL_GPL(nx842_decompress); static __init int nx842_init(void) { - pr_info("loading\n"); - - if (of_find_compatible_node(NULL, NULL, NX842_POWERNV_COMPAT_NAME)) - request_module_nowait(NX842_POWERNV_MODULE_NAME); - else if (of_find_compatible_node(NULL, NULL, NX842_PSERIES_COMPAT_NAME)) - request_module_nowait(NX842_PSERIES_MODULE_NAME); - else + request_module("nx-compress-powernv"); + request_module("nx-compress-pseries"); + + /* we prevent loading if there's no platform driver, and we get the + * module that set it so it won't unload, so we don't need to check + * if it's set in any of the above functions + */ + if (!nx842_platform_driver_get()) { pr_err("no nx842 driver found.\n"); - - pr_info("loaded\n"); + return -ENODEV; + } return 0; } @@ -179,6 +87,6 @@ module_init(nx842_init); static void __exit nx842_exit(void) { - pr_info("NX842 unloaded\n"); + nx842_platform_driver_put(); } module_exit(nx842_exit); diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 84b15b7448bb..1730f4da1cf6 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -105,6 +105,7 @@ static inline unsigned long nx842_get_pa(void *addr) #define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) struct nx842_driver { + char *name; struct module *owner; struct nx842_constraints *constraints; @@ -117,15 +118,10 @@ struct nx842_driver { void *wrkmem); }; -void nx842_register_driver(struct nx842_driver *driver); -void nx842_unregister_driver(struct nx842_driver *driver); - - -/* To allow the main nx-compress module to load platform module */ -#define NX842_POWERNV_MODULE_NAME "nx-compress-powernv" -#define NX842_POWERNV_COMPAT_NAME "ibm,power-nx" -#define NX842_PSERIES_MODULE_NAME "nx-compress-pseries" -#define NX842_PSERIES_COMPAT_NAME "ibm,compression" - +struct nx842_driver *nx842_platform_driver(void); +bool nx842_platform_driver_set(struct nx842_driver *driver); +void nx842_platform_driver_unset(struct nx842_driver *driver); +bool nx842_platform_driver_get(void); +void nx842_platform_driver_put(void); #endif /* __NX_842_H__ */ -- cgit v1.2.3 From fb43f69401fef8ed2f72d7ea4a25910a0f2138bc Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 1 Jun 2015 11:15:53 -0500 Subject: crypto: ccp - Protect against poorly marked end of sg list Scatter gather lists can be created with more available entries than are actually used (e.g. using sg_init_table() to reserve a specific number of sg entries, but in actuality using something less than that based on the data length). The caller sometimes fails to mark the last entry with sg_mark_end(). In these cases, sg_nents() will return the original size of the sg list as opposed to the actual number of sg entries that contain valid data. On arm64, if the sg_nents() value is used in a call to dma_map_sg() in this situation, then it causes a BUG_ON in lib/swiotlb.c because an "empty" sg list entry results in dma_capable() returning false and swiotlb trying to create a bounce buffer of size 0. This occurred in the userspace crypto interface before being fixed by 0f477b655a52 ("crypto: algif - Mark sgl end at the end of data") Protect against this by using the new sg_nents_for_len() function which returns only the number of sg entries required to meet the desired length and supplying that value to dma_map_sg(). Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-ops.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 542453c9ba1c..d09c6c4af4aa 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -52,7 +52,7 @@ struct ccp_dm_workarea { struct ccp_sg_workarea { struct scatterlist *sg; - unsigned int nents; + int nents; struct scatterlist *dma_sg; struct device *dma_dev; @@ -495,7 +495,10 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, if (!sg) return 0; - wa->nents = sg_nents(sg); + wa->nents = sg_nents_for_len(sg, len); + if (wa->nents < 0) + return wa->nents; + wa->bytes_left = len; wa->sg_used = 0; -- cgit v1.2.3 From 3154de71258a32040214fda174e67b975b0810ef Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Tue, 2 Jun 2015 15:22:10 -0400 Subject: crypto: nx - fix nx-842 pSeries driver minimum buffer size Reduce the nx-842 pSeries driver minimum buffer size from 128 to 8. Also replace the single use of IO_BUFFER_ALIGN macro with the standard and correct DDE_BUFFER_ALIGN. The hw sometimes rejects buffers that contain padding past the end of the 8-byte aligned section where it sees the "end" marker. With the minimum buffer size set too high, some highly compressed buffers were being padded and the hw was incorrectly rejecting them; this sets the minimum correctly so there will be no incorrect padding. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842-pseries.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 17f191777139..41bc551ccaf3 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -30,13 +30,10 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Jennings "); MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); -/* IO buffer must be 128 byte aligned */ -#define IO_BUFFER_ALIGN 128 - static struct nx842_constraints nx842_pseries_constraints = { - .alignment = IO_BUFFER_ALIGN, + .alignment = DDE_BUFFER_ALIGN, .multiple = DDE_BUFFER_LAST_MULT, - .minimum = IO_BUFFER_ALIGN, + .minimum = DDE_BUFFER_LAST_MULT, .maximum = PAGE_SIZE, /* dynamic, max_sync_size */ }; -- cgit v1.2.3 From e4fa1460b35e18a0eae26223ca91047d92992f3e Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 5 Jun 2015 15:51:18 -0700 Subject: crypto: qat - Set max request size The device doensn't support the default value and will change it to 256, which will cause performace degradation for biger packets. Add an explicit write to set it to 1024. Reported-by: Tianliang Wang Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index ecf0ef1fcda8..1bde45b7a3c5 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -300,6 +300,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err; + pcie_set_readrq(pdev, 1024); + /* enable PCI device */ if (pci_enable_device(pdev)) { ret = -EFAULT; -- cgit v1.2.3 From ecb479d07a4e2db980965193511dbf2563d50db5 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 5 Jun 2015 15:52:13 -0700 Subject: crypto: qat: fix issue when mapping assoc to internal AD struct This patch fixes an issue when building an internal AD representation. We need to check assoclen and not only blindly loop over assoc sgl. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index dc231b860a1e..067402c7c2a9 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -653,7 +653,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, } static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - struct scatterlist *assoc, + struct scatterlist *assoc, int assoclen, struct scatterlist *sgl, struct scatterlist *sglout, uint8_t *iv, uint8_t ivlen, @@ -685,15 +685,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, for_each_sg(assoc, sg, assoc_n, i) { if (!sg->length) continue; - bufl->bufers[bufs].addr = dma_map_single(dev, - sg_virt(sg), - sg->length, - DMA_BIDIRECTIONAL); - bufl->bufers[bufs].len = sg->length; + + if (!(assoclen > 0)) + break; + + bufl->bufers[bufs].addr = + dma_map_single(dev, sg_virt(sg), + min_t(int, assoclen, sg->length), + DMA_BIDIRECTIONAL); + bufl->bufers[bufs].len = min_t(int, assoclen, sg->length); if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) goto err; bufs++; + assoclen -= sg->length; } + if (ivlen) { bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, DMA_BIDIRECTIONAL); @@ -845,8 +851,9 @@ static int qat_alg_aead_dec(struct aead_request *areq) int digst_size = crypto_aead_crt(aead_tfm)->authsize; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, - areq->iv, AES_BLOCK_SIZE, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, + areq->src, areq->dst, areq->iv, + AES_BLOCK_SIZE, qat_req); if (unlikely(ret)) return ret; @@ -889,8 +896,9 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, struct icp_qat_fw_la_bulk_req *msg; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, - iv, AES_BLOCK_SIZE, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, + areq->src, areq->dst, iv, AES_BLOCK_SIZE, + qat_req); if (unlikely(ret)) return ret; @@ -1017,7 +1025,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) struct icp_qat_fw_la_bulk_req *msg; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, + ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, NULL, 0, qat_req); if (unlikely(ret)) return ret; @@ -1055,7 +1063,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) struct icp_qat_fw_la_bulk_req *msg; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, + ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, NULL, 0, qat_req); if (unlikely(ret)) return ret; -- cgit v1.2.3 From 70c3c8a96a85d333b3ff1f24df84c0e179261a8a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 8 Jun 2015 16:38:24 +0800 Subject: crypto: caam - Clamp AEAD SG list by input length Currently caam assumes that the SG list contains exactly the number of bytes required. This assumption is incorrect. Up until now this has been harmless. However with the new AEAD interface this now breaks as the AD SG list contains more bytes than just the AD. This patch fixes this by always clamping the AD SG list by the specified AD length. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 20 ++++++++------------ drivers/crypto/caam/sg_sw_sec4.h | 15 +++++++++++++++ 2 files changed, 23 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 3d850ab47c28..3c37fe63e598 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -2713,10 +2713,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_index = 0; if (!all_contig) { if (!is_gcm) { - sg_to_sec4_sg(req->assoc, - assoc_nents, - edesc->sec4_sg + - sec4_sg_index, 0); + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); sec4_sg_index += assoc_nents; } @@ -2725,10 +2723,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_index += 1; if (is_gcm) { - sg_to_sec4_sg(req->assoc, - assoc_nents, - edesc->sec4_sg + - sec4_sg_index, 0); + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); sec4_sg_index += assoc_nents; } @@ -2953,8 +2949,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request sec4_sg_index = 0; if (!(contig & GIV_SRC_CONTIG)) { if (!is_gcm) { - sg_to_sec4_sg(req->assoc, assoc_nents, - edesc->sec4_sg + sec4_sg_index, 0); + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); sec4_sg_index += assoc_nents; } @@ -2963,8 +2959,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request sec4_sg_index += 1; if (is_gcm) { - sg_to_sec4_sg(req->assoc, assoc_nents, - edesc->sec4_sg + sec4_sg_index, 0); + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); sec4_sg_index += assoc_nents; } diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index 3b918218aa4c..efbc1db8da53 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -55,6 +55,21 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; } +static inline struct sec4_sg_entry *sg_to_sec4_sg_len( + struct scatterlist *sg, unsigned int total, + struct sec4_sg_entry *sec4_sg_ptr) +{ + do { + unsigned int len = min(sg_dma_len(sg), total); + + dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0); + sec4_sg_ptr++; + sg = sg_next(sg); + total -= len; + } while (total); + return sec4_sg_ptr - 1; +} + /* count number of elements in scatterlist */ static inline int __sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) -- cgit v1.2.3 From 205a525c334295e3cd4cc7755fd2c0398e3a787f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 9 Jun 2015 18:19:39 +0800 Subject: random: Add callback API for random pool readiness The get_blocking_random_bytes API is broken because the wait can be arbitrarily long (potentially forever) so there is no safe way of calling it from within the kernel. This patch replaces it with a callback API instead. The callback is invoked potentially from interrupt context so the user needs to schedule their own work thread if necessary. In addition to adding callbacks, they can also be removed as otherwise this opens up a way for user-space to allocate kernel memory with no bound (by opening algif_rng descriptors and then closing them). Signed-off-by: Herbert Xu --- drivers/char/random.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/random.h | 9 ++++++ 2 files changed, 87 insertions(+) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 159d0700f7d8..a1576ed1d88e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); static struct fasync_struct *fasync; +static DEFINE_SPINLOCK(random_ready_list_lock); +static LIST_HEAD(random_ready_list); + /********************************************************************** * * OS independent entropy store. Here are the functions which handle @@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f) f->count++; } +static void process_random_ready_list(void) +{ + unsigned long flags; + struct random_ready_callback *rdy, *tmp; + + spin_lock_irqsave(&random_ready_list_lock, flags); + list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { + struct module *owner = rdy->owner; + + list_del_init(&rdy->list); + rdy->func(rdy); + module_put(owner); + } + spin_unlock_irqrestore(&random_ready_list_lock, flags); +} + /* * Credit (or debit) the entropy store with n bits of entropy. * Use credit_entropy_bits_safe() if the value comes from userspace @@ -660,6 +679,7 @@ retry: r->entropy_total = 0; if (r == &nonblocking_pool) { prandom_reseed_late(); + process_random_ready_list(); wake_up_all(&urandom_init_wait); pr_notice("random: %s pool is initialized\n", r->name); } @@ -1256,6 +1276,64 @@ void get_blocking_random_bytes(void *buf, int nbytes) } EXPORT_SYMBOL(get_blocking_random_bytes); +/* + * Add a callback function that will be invoked when the nonblocking + * pool is initialised. + * + * returns: 0 if callback is successfully added + * -EALREADY if pool is already initialised (callback not called) + * -ENOENT if module for callback is not alive + */ +int add_random_ready_callback(struct random_ready_callback *rdy) +{ + struct module *owner; + unsigned long flags; + int err = -EALREADY; + + if (likely(nonblocking_pool.initialized)) + return err; + + owner = rdy->owner; + if (!try_module_get(owner)) + return -ENOENT; + + spin_lock_irqsave(&random_ready_list_lock, flags); + if (nonblocking_pool.initialized) + goto out; + + owner = NULL; + + list_add(&rdy->list, &random_ready_list); + err = 0; + +out: + spin_unlock_irqrestore(&random_ready_list_lock, flags); + + module_put(owner); + + return err; +} +EXPORT_SYMBOL(add_random_ready_callback); + +/* + * Delete a previously registered readiness callback function. + */ +void del_random_ready_callback(struct random_ready_callback *rdy) +{ + unsigned long flags; + struct module *owner = NULL; + + spin_lock_irqsave(&random_ready_list_lock, flags); + if (!list_empty(&rdy->list)) { + list_del_init(&rdy->list); + owner = rdy->owner; + } + spin_unlock_irqrestore(&random_ready_list_lock, flags); + + module_put(owner); +} +EXPORT_SYMBOL(del_random_ready_callback); + /* * This function will use the architecture-specific hardware random * number generator if it is available. The arch-specific hw RNG will diff --git a/include/linux/random.h b/include/linux/random.h index 796267d56901..30e2aca0b16a 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -6,8 +6,15 @@ #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H +#include #include +struct random_ready_callback { + struct list_head list; + void (*func)(struct random_ready_callback *rdy); + struct module *owner; +}; + extern void add_device_randomness(const void *, unsigned int); extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); @@ -15,6 +22,8 @@ extern void add_interrupt_randomness(int irq, int irq_flags); extern void get_random_bytes(void *buf, int nbytes); extern void get_blocking_random_bytes(void *buf, int nbytes); +extern int add_random_ready_callback(struct random_ready_callback *rdy); +extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); void generate_random_uuid(unsigned char uuid_out[16]); extern int random_int_secret_init(void); -- cgit v1.2.3 From c2719503f5e1e6213d716bb078bdad01e28ebcbf Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 9 Jun 2015 18:19:42 +0800 Subject: random: Remove kernel blocking API This patch removes the kernel blocking API as it has been completely replaced by the callback API. Signed-off-by: Herbert Xu --- drivers/char/random.c | 12 ------------ include/linux/random.h | 1 - 2 files changed, 13 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index a1576ed1d88e..d0da5d852d41 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1264,18 +1264,6 @@ void get_random_bytes(void *buf, int nbytes) } EXPORT_SYMBOL(get_random_bytes); -/* - * Equivalent function to get_random_bytes with the difference that this - * function blocks the request until the nonblocking_pool is initialized. - */ -void get_blocking_random_bytes(void *buf, int nbytes) -{ - if (unlikely(nonblocking_pool.initialized == 0)) - wait_event(urandom_init_wait, nonblocking_pool.initialized); - extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); -} -EXPORT_SYMBOL(get_blocking_random_bytes); - /* * Add a callback function that will be invoked when the nonblocking * pool is initialised. diff --git a/include/linux/random.h b/include/linux/random.h index 30e2aca0b16a..e651874df2c9 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -21,7 +21,6 @@ extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_interrupt_randomness(int irq, int irq_flags); extern void get_random_bytes(void *buf, int nbytes); -extern void get_blocking_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); -- cgit v1.2.3 From 72071fe43e8dad13f2a30897876d72463d46d065 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 11 Jun 2015 11:28:32 +0800 Subject: crypto: picoxcell - Include linux/sizes.h This driver uses SZ_64K so it should include linux/sizes.h rather than relying on others to pull it in for it. Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index eb2a0ca49eda..aabf9d4f8e2e 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From 1a5b951f256d772a4ab758bda3a0667b788c0d2a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 11 Jun 2015 11:28:33 +0800 Subject: crypto: picoxcell - Make use of sg_nents_for_len This patch makes use of the new sg_nents_for_len helper to replace the custom sg_count function. Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index aabf9d4f8e2e..9eb27c71cedf 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -262,18 +262,9 @@ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, } /* Count the number of scatterlist entries in a scatterlist. */ -static int sg_count(struct scatterlist *sg_list, int nbytes) +static inline int sg_count(struct scatterlist *sg_list, int nbytes) { - struct scatterlist *sg = sg_list; - int sg_nents = 0; - - while (nbytes > 0) { - ++sg_nents; - nbytes -= sg->length; - sg = sg_next(sg); - } - - return sg_nents; + return sg_nents_for_len(sg_list, nbytes); } static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) -- cgit v1.2.3 From 81781e681551ed3f56c9202e9adc7ef941cba654 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 11 Jun 2015 11:28:34 +0800 Subject: crypto: picoxcell - Clamp AEAD SG list by input length Currently the driver assumes that the SG list contains exactly the number of bytes required. This assumption is incorrect. Up until now this has been harmless. However with the new AEAD interface this now breaks as the AD SG list contains more bytes than just the AD. This patch fixes this by always clamping the AD SG list by the specified AD length. Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 9eb27c71cedf..c2fd860745ad 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -318,6 +318,7 @@ static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) struct spacc_ddt *src_ddt, *dst_ddt; unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); unsigned nents = sg_count(areq->src, areq->cryptlen); + unsigned total; dma_addr_t iv_addr; struct scatterlist *cur; int i, dst_ents, src_ents, assoc_ents; @@ -361,11 +362,18 @@ static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) * Map the associated data. For decryption we don't copy the * associated data. */ + total = areq->assoclen; for_each_sg(areq->assoc, cur, assoc_ents, i) { - ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); + unsigned len = sg_dma_len(cur); + + if (len > total) + len = total; + + total -= len; + + ddt_set(src_ddt++, sg_dma_address(cur), len); if (req->is_encrypt) - ddt_set(dst_ddt++, sg_dma_address(cur), - sg_dma_len(cur)); + ddt_set(dst_ddt++, sg_dma_address(cur), len); } ddt_set(src_ddt++, iv_addr, ivsize); -- cgit v1.2.3 From 32be6d3e362b896c81aae7c635d44e5a91406ce2 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Fri, 12 Jun 2015 10:58:46 -0400 Subject: crypto: nx - move include/linux/nx842.h into drivers/crypto/nx/nx-842.h Move the contents of the include/linux/nx842.h header file into the drivers/crypto/nx/nx-842.h header file. Remove the nx842.h header file and its entry in the MAINTAINERS file. The include/linux/nx842.h header originally was there because the crypto/842.c driver needed it to communicate with the nx-842 hw driver. However, that crypto compression driver was moved into the drivers/crypto/nx/ directory, and now can directly include the nx-842.h header. Nothing else needs the public include/linux/nx842.h header file, as all use of the nx-842 hardware driver will be through the "842-nx" crypto compression driver, since the direct nx-842 api is very limited in the buffer alignments and sizes that it will accept, and the crypto compression interface handles those limitations and allows any alignment and size buffers. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- MAINTAINERS | 1 - drivers/crypto/nx/nx-842-crypto.c | 3 ++- drivers/crypto/nx/nx-842.h | 21 ++++++++++++++++++++- include/linux/nx842.h | 24 ------------------------ 4 files changed, 22 insertions(+), 27 deletions(-) delete mode 100644 include/linux/nx842.h (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index 912c9d9f741c..fb06e1eba758 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4862,7 +4862,6 @@ IBM Power 842 compression accelerator M: Dan Streetman S: Supported F: drivers/crypto/nx/nx-842* -F: include/linux/nx842.h F: include/linux/sw842.h F: crypto/842.c F: lib/842/ diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c index 2ffa1031c91f..95066f336b26 100644 --- a/drivers/crypto/nx/nx-842-crypto.c +++ b/drivers/crypto/nx/nx-842-crypto.c @@ -58,10 +58,11 @@ #include #include #include -#include #include #include +#include "nx-842.h" + /* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit * template (see lib/842/842.h), so this magic number will never appear at * the start of a raw 842 compressed buffer. That is important, as any buffer diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 1730f4da1cf6..4dbac11c2aa5 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -4,7 +4,6 @@ #include #include -#include #include #include #include @@ -12,6 +11,12 @@ #include #include +#define __NX842_PSERIES_MEM_COMPRESS (10240) +#define __NX842_POWERNV_MEM_COMPRESS (1024) + +#define NX842_MEM_COMPRESS (max_t(unsigned int, \ + __NX842_PSERIES_MEM_COMPRESS, __NX842_POWERNV_MEM_COMPRESS)) + /* Restrictions on Data Descriptor List (DDL) and Entry (DDE) buffers * * From NX P8 workbook, sec 4.9.1 "842 details" @@ -104,6 +109,13 @@ static inline unsigned long nx842_get_pa(void *addr) #define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m)) #define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) +struct nx842_constraints { + int alignment; + int multiple; + int minimum; + int maximum; +}; + struct nx842_driver { char *name; struct module *owner; @@ -124,4 +136,11 @@ void nx842_platform_driver_unset(struct nx842_driver *driver); bool nx842_platform_driver_get(void); void nx842_platform_driver_put(void); +int nx842_constraints(struct nx842_constraints *constraints); + +int nx842_compress(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, void *wrkmem); +int nx842_decompress(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, void *wrkmem); + #endif /* __NX_842_H__ */ diff --git a/include/linux/nx842.h b/include/linux/nx842.h deleted file mode 100644 index 4ddf68d9c0d4..000000000000 --- a/include/linux/nx842.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef __NX842_H__ -#define __NX842_H__ - -#define __NX842_PSERIES_MEM_COMPRESS (10240) -#define __NX842_POWERNV_MEM_COMPRESS (1024) - -#define NX842_MEM_COMPRESS (max_t(unsigned int, \ - __NX842_PSERIES_MEM_COMPRESS, __NX842_POWERNV_MEM_COMPRESS)) - -struct nx842_constraints { - int alignment; - int multiple; - int minimum; - int maximum; -}; - -int nx842_constraints(struct nx842_constraints *constraints); - -int nx842_compress(const unsigned char *in, unsigned int in_len, - unsigned char *out, unsigned int *out_len, void *wrkmem); -int nx842_decompress(const unsigned char *in, unsigned int in_len, - unsigned char *out, unsigned int *out_len, void *wrkmem); - -#endif -- cgit v1.2.3 From 2c6f6eabc0bfcea0a62370038da713e3873cff31 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Fri, 12 Jun 2015 10:58:47 -0400 Subject: crypto: nx - replace NX842_MEM_COMPRESS with function Replace the NX842_MEM_COMPRESS define with a function that returns the specific platform driver's required working memory size. The common nx-842.c driver refuses to load if there is no platform driver present, so instead of defining an approximate working memory size that's the maximum approximate size of both platform driver's size requirements, the platform driver can directly provide its specific, i.e. sizeof(struct nx842_workmem), size requirements which the 842-nx crypto compression driver will use. This saves memory by both reducing the required size of each driver to the specific sizeof() amount, as well as using the specific loaded platform driver's required amount, instead of the maximum of both. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842-crypto.c | 2 +- drivers/crypto/nx/nx-842-powernv.c | 11 +++++++---- drivers/crypto/nx/nx-842-pseries.c | 7 +++---- drivers/crypto/nx/nx-842.c | 11 +++++++++++ drivers/crypto/nx/nx-842.h | 9 +++------ 5 files changed, 25 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c index 95066f336b26..d53a1dcd7b4e 100644 --- a/drivers/crypto/nx/nx-842-crypto.c +++ b/drivers/crypto/nx/nx-842-crypto.c @@ -136,7 +136,7 @@ static int nx842_crypto_init(struct crypto_tfm *tfm) { struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->wmem = kmalloc(NX842_MEM_COMPRESS, GFP_KERNEL); + ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL); ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index c388dba7da64..33b3b0abf4ae 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -367,7 +367,8 @@ static int wait_for_csb(struct nx842_workmem *wmem, * @inlen: input buffer size * @out: output buffer pointer * @outlenp: output buffer size pointer - * @workmem: working memory buffer pointer, must be at least NX842_MEM_COMPRESS + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size * @fc: function code, see CCW Function Codes in nx-842.h * * Returns: @@ -477,7 +478,8 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, * @inlen: input buffer size * @out: output buffer pointer * @outlenp: output buffer size pointer - * @workmem: working memory buffer pointer, must be at least NX842_MEM_COMPRESS + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size * * Returns: see @nx842_powernv_function() */ @@ -504,7 +506,8 @@ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, * @inlen: input buffer size * @out: output buffer pointer * @outlenp: output buffer size pointer - * @workmem: working memory buffer pointer, must be at least NX842_MEM_COMPRESS + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size * * Returns: see @nx842_powernv_function() */ @@ -572,6 +575,7 @@ static struct nx842_constraints nx842_powernv_constraints = { static struct nx842_driver nx842_powernv_driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, + .workmem_size = sizeof(struct nx842_workmem), .constraints = &nx842_powernv_constraints, .compress = nx842_powernv_compress, .decompress = nx842_powernv_decompress, @@ -582,7 +586,6 @@ static __init int nx842_powernv_init(void) struct device_node *dn; /* verify workmem size/align restrictions */ - BUILD_BUG_ON(sizeof(struct nx842_workmem) > NX842_MEM_COMPRESS); BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN); BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN); diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 41bc551ccaf3..da52d8edefb3 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -284,7 +284,7 @@ static int nx842_validate_result(struct device *dev, * @out: Pointer to output buffer * @outlen: Length of output buffer * @wrkmem: ptr to buffer for working memory, size determined by - * NX842_MEM_COMPRESS + * nx842_pseries_driver.workmem_size * * Returns: * 0 Success, output of length @outlen stored in the buffer at @out @@ -411,7 +411,7 @@ unlock: * @out: Pointer to output buffer * @outlen: Length of output buffer * @wrkmem: ptr to buffer for working memory, size determined by - * NX842_MEM_COMPRESS + * nx842_pseries_driver.workmem_size * * Returns: * 0 Success, output of length @outlen stored in the buffer at @out @@ -963,6 +963,7 @@ static struct attribute_group nx842_attribute_group = { static struct nx842_driver nx842_pseries_driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, + .workmem_size = sizeof(struct nx842_workmem), .constraints = &nx842_pseries_constraints, .compress = nx842_pseries_compress, .decompress = nx842_pseries_decompress, @@ -1084,8 +1085,6 @@ static int __init nx842_init(void) pr_info("Registering IBM Power 842 compression driver\n"); - BUILD_BUG_ON(sizeof(struct nx842_workmem) > NX842_MEM_COMPRESS); - if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) return -ENODEV; diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 9f391d64c722..6e5e0d60d0c8 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -53,6 +53,17 @@ int nx842_constraints(struct nx842_constraints *c) } EXPORT_SYMBOL_GPL(nx842_constraints); +/** + * nx842_workmem_size + * + * Get the amount of working memory the driver requires. + */ +size_t nx842_workmem_size(void) +{ + return nx842_platform_driver()->workmem_size; +} +EXPORT_SYMBOL_GPL(nx842_workmem_size); + int nx842_compress(const unsigned char *in, unsigned int ilen, unsigned char *out, unsigned int *olen, void *wmem) { diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 4dbac11c2aa5..f6821b65b7ce 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -11,12 +11,6 @@ #include #include -#define __NX842_PSERIES_MEM_COMPRESS (10240) -#define __NX842_POWERNV_MEM_COMPRESS (1024) - -#define NX842_MEM_COMPRESS (max_t(unsigned int, \ - __NX842_PSERIES_MEM_COMPRESS, __NX842_POWERNV_MEM_COMPRESS)) - /* Restrictions on Data Descriptor List (DDL) and Entry (DDE) buffers * * From NX P8 workbook, sec 4.9.1 "842 details" @@ -119,6 +113,7 @@ struct nx842_constraints { struct nx842_driver { char *name; struct module *owner; + size_t workmem_size; struct nx842_constraints *constraints; @@ -136,6 +131,8 @@ void nx842_platform_driver_unset(struct nx842_driver *driver); bool nx842_platform_driver_get(void); void nx842_platform_driver_put(void); +size_t nx842_workmem_size(void); + int nx842_constraints(struct nx842_constraints *constraints); int nx842_compress(const unsigned char *in, unsigned int in_len, -- cgit v1.2.3 From 0903e435ba45e312a9e1f34257d6a2b87a3af2d9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 15 Jun 2015 16:55:07 +0800 Subject: crypto: vmx - Remove duplicate PPC64 dependency The top-level CRYPTO_DEV_VMX option already depends on PPC64 so there is no need to depend on it again at CRYPTO_DEV_VMX_ENCRYPT. This patch also removes a redundant "default n". Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 - drivers/crypto/vmx/Kconfig | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 6b0579fdbbb2..92c899fbfe27 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -459,7 +459,6 @@ config CRYPTO_DEV_QCE config CRYPTO_DEV_VMX bool "Support for VMX cryptographic acceleration instructions" depends on PPC64 - default n help Support for VMX cryptographic acceleration instructions. diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index 771babf16aa0..89d8208d9851 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -1,6 +1,6 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" - depends on PPC64 && CRYPTO_DEV_VMX + depends on CRYPTO_DEV_VMX default y help Support for VMX cryptographic acceleration instructions on Power8 CPU. -- cgit v1.2.3 From 4beb106045976b785a05e77ab5430ad04b6038b7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 15 Jun 2015 16:55:46 +0800 Subject: crypto: vmx - Reindent to kernel style This patch reidents the vmx code-base to the kernel coding style. Signed-off-by: Herbert Xu --- drivers/crypto/vmx/aes.c | 166 ++++++++++++------------ drivers/crypto/vmx/aes_cbc.c | 236 +++++++++++++++++----------------- drivers/crypto/vmx/aes_ctr.c | 225 +++++++++++++++++---------------- drivers/crypto/vmx/aesp8-ppc.h | 15 +-- drivers/crypto/vmx/ghash.c | 278 +++++++++++++++++++++-------------------- drivers/crypto/vmx/vmx.c | 68 +++++----- 6 files changed, 506 insertions(+), 482 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index ab300ea19434..023e5f014783 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -30,110 +30,112 @@ #include "aesp8-ppc.h" struct p8_aes_ctx { - struct crypto_cipher *fallback; - struct aes_key enc_key; - struct aes_key dec_key; + struct crypto_cipher *fallback; + struct aes_key enc_key; + struct aes_key dec_key; }; static int p8_aes_init(struct crypto_tfm *tfm) { - const char *alg; - struct crypto_cipher *fallback; - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - - fallback = crypto_alloc_cipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); - - crypto_cipher_set_flags(fallback, - crypto_cipher_get_flags((struct crypto_cipher *) tfm)); - ctx->fallback = fallback; - - return 0; + const char *alg; + struct crypto_cipher *fallback; + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_cipher_set_flags(fallback, + crypto_cipher_get_flags((struct + crypto_cipher *) + tfm)); + ctx->fallback = fallback; + + return 0; } static void p8_aes_exit(struct crypto_tfm *tfm) { - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_cipher(ctx->fallback); - ctx->fallback = NULL; - } + if (ctx->fallback) { + crypto_free_cipher(ctx->fallback); + ctx->fallback = NULL; + } } static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - int ret; - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - pagefault_disable(); - enable_kernel_altivec(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); - ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); - pagefault_enable(); - - ret += crypto_cipher_setkey(ctx->fallback, key, keylen); - return ret; + int ret; + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); + + ret += crypto_cipher_setkey(ctx->fallback, key, keylen); + return ret; } static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - if (in_interrupt()) { - crypto_cipher_encrypt_one(ctx->fallback, dst, src); - } else { - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_encrypt(src, dst, &ctx->enc_key); - pagefault_enable(); - } + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (in_interrupt()) { + crypto_cipher_encrypt_one(ctx->fallback, dst, src); + } else { + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_encrypt(src, dst, &ctx->enc_key); + pagefault_enable(); + } } static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - if (in_interrupt()) { - crypto_cipher_decrypt_one(ctx->fallback, dst, src); - } else { - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_decrypt(src, dst, &ctx->dec_key); - pagefault_enable(); - } + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (in_interrupt()) { + crypto_cipher_decrypt_one(ctx->fallback, dst, src); + } else { + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_decrypt(src, dst, &ctx->dec_key); + pagefault_enable(); + } } struct crypto_alg p8_aes_alg = { - .cra_name = "aes", - .cra_driver_name = "p8_aes", - .cra_module = THIS_MODULE, - .cra_priority = 1000, - .cra_type = NULL, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, - .cra_alignmask = 0, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct p8_aes_ctx), - .cra_init = p8_aes_init, - .cra_exit = p8_aes_exit, - .cra_cipher = { - .cia_min_keysize = AES_MIN_KEY_SIZE, - .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = p8_aes_setkey, - .cia_encrypt = p8_aes_encrypt, - .cia_decrypt = p8_aes_decrypt, - }, + .cra_name = "aes", + .cra_driver_name = "p8_aes", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = NULL, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_aes_ctx), + .cra_init = p8_aes_init, + .cra_exit = p8_aes_exit, + .cra_cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = p8_aes_setkey, + .cia_encrypt = p8_aes_encrypt, + .cia_decrypt = p8_aes_decrypt, + }, }; - diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 1a559b7dddb5..7120ab24d8c6 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -31,154 +31,162 @@ #include "aesp8-ppc.h" struct p8_aes_cbc_ctx { - struct crypto_blkcipher *fallback; - struct aes_key enc_key; - struct aes_key dec_key; + struct crypto_blkcipher *fallback; + struct aes_key enc_key; + struct aes_key dec_key; }; static int p8_aes_cbc_init(struct crypto_tfm *tfm) { - const char *alg; - struct crypto_blkcipher *fallback; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - - fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); - - crypto_blkcipher_set_flags(fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm)); - ctx->fallback = fallback; - - return 0; + const char *alg; + struct crypto_blkcipher *fallback; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = + crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_blkcipher_set_flags( + fallback, + crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + ctx->fallback = fallback; + + return 0; } static void p8_aes_cbc_exit(struct crypto_tfm *tfm) { - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); - ctx->fallback = NULL; - } + if (ctx->fallback) { + crypto_free_blkcipher(ctx->fallback); + ctx->fallback = NULL; + } } static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - int ret; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - pagefault_disable(); - enable_kernel_altivec(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); - ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); - pagefault_enable(); + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); - return ret; + ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + return ret; } static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - int ret; - struct blkcipher_walk walk; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( - crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; - - if (in_interrupt()) { - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); - } else { - pagefault_disable(); - enable_kernel_altivec(); - - blkcipher_walk_init(&walk, dst, src, nbytes); - ret = blkcipher_walk_virt(desc, &walk); - while ((nbytes = walk.nbytes)) { - aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, - nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); + int ret; + struct blkcipher_walk walk; + struct p8_aes_cbc_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, + nbytes); + } else { + pagefault_disable(); + enable_kernel_altivec(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { + aes_p8_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, + &ctx->enc_key, walk.iv, 1); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); - } + } - pagefault_enable(); - } + pagefault_enable(); + } - return ret; + return ret; } static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - int ret; - struct blkcipher_walk walk; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( - crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; - - if (in_interrupt()) { - ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); - } else { - pagefault_disable(); - enable_kernel_altivec(); - - blkcipher_walk_init(&walk, dst, src, nbytes); - ret = blkcipher_walk_virt(desc, &walk); - while ((nbytes = walk.nbytes)) { - aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, - nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); + int ret; + struct blkcipher_walk walk; + struct p8_aes_cbc_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, + nbytes); + } else { + pagefault_disable(); + enable_kernel_altivec(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { + aes_p8_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, + &ctx->dec_key, walk.iv, 0); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - pagefault_enable(); - } + pagefault_enable(); + } - return ret; + return ret; } struct crypto_alg p8_aes_cbc_alg = { - .cra_name = "cbc(aes)", - .cra_driver_name = "p8_aes_cbc", - .cra_module = THIS_MODULE, - .cra_priority = 1000, - .cra_type = &crypto_blkcipher_type, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, - .cra_alignmask = 0, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), - .cra_init = p8_aes_cbc_init, - .cra_exit = p8_aes_cbc_exit, - .cra_blkcipher = { - .ivsize = 0, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = p8_aes_cbc_setkey, - .encrypt = p8_aes_cbc_encrypt, - .decrypt = p8_aes_cbc_decrypt, - }, + .cra_name = "cbc(aes)", + .cra_driver_name = "p8_aes_cbc", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = &crypto_blkcipher_type, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), + .cra_init = p8_aes_cbc_init, + .cra_exit = p8_aes_cbc_exit, + .cra_blkcipher = { + .ivsize = 0, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = p8_aes_cbc_setkey, + .encrypt = p8_aes_cbc_encrypt, + .decrypt = p8_aes_cbc_decrypt, + }, }; - diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 96dbee4bf4a6..7adae42a7b79 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -30,138 +30,147 @@ #include "aesp8-ppc.h" struct p8_aes_ctr_ctx { - struct crypto_blkcipher *fallback; - struct aes_key enc_key; + struct crypto_blkcipher *fallback; + struct aes_key enc_key; }; static int p8_aes_ctr_init(struct crypto_tfm *tfm) { - const char *alg; - struct crypto_blkcipher *fallback; - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - - fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); - - crypto_blkcipher_set_flags(fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm)); - ctx->fallback = fallback; - - return 0; + const char *alg; + struct crypto_blkcipher *fallback; + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = + crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_blkcipher_set_flags( + fallback, + crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + ctx->fallback = fallback; + + return 0; } static void p8_aes_ctr_exit(struct crypto_tfm *tfm) { - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); - ctx->fallback = NULL; - } + if (ctx->fallback) { + crypto_free_blkcipher(ctx->fallback); + ctx->fallback = NULL; + } } static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - int ret; - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - pagefault_disable(); - enable_kernel_altivec(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); - pagefault_enable(); + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + pagefault_enable(); - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); - return ret; + ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + return ret; } static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, - struct blkcipher_walk *walk) + struct blkcipher_walk *walk) { - u8 *ctrblk = walk->iv; - u8 keystream[AES_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); - pagefault_enable(); - - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + u8 *ctrblk = walk->iv; + u8 keystream[AES_BLOCK_SIZE]; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); + pagefault_enable(); + + crypto_xor(keystream, src, nbytes); + memcpy(dst, keystream, nbytes); + crypto_inc(ctrblk, AES_BLOCK_SIZE); } static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - int ret; - struct blkcipher_walk walk; - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx( - crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; - - if (in_interrupt()) { - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); - } else { - blkcipher_walk_init(&walk, dst, src, nbytes); - ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, - (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); - pagefault_enable(); - - crypto_inc(walk.iv, AES_BLOCK_SIZE); - nbytes &= AES_BLOCK_SIZE - 1; - ret = blkcipher_walk_done(desc, &walk, nbytes); - } - if (walk.nbytes) { - p8_aes_ctr_final(ctx, &walk); - ret = blkcipher_walk_done(desc, &walk, 0); - } - } - - return ret; + int ret; + struct blkcipher_walk walk; + struct p8_aes_ctr_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, + nbytes); + } else { + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, + walk.dst.virt.addr, + (nbytes & + AES_BLOCK_MASK) / + AES_BLOCK_SIZE, + &ctx->enc_key, + walk.iv); + pagefault_enable(); + + crypto_inc(walk.iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { + p8_aes_ctr_final(ctx, &walk); + ret = blkcipher_walk_done(desc, &walk, 0); + } + } + + return ret; } struct crypto_alg p8_aes_ctr_alg = { - .cra_name = "ctr(aes)", - .cra_driver_name = "p8_aes_ctr", - .cra_module = THIS_MODULE, - .cra_priority = 1000, - .cra_type = &crypto_blkcipher_type, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, - .cra_alignmask = 0, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), - .cra_init = p8_aes_ctr_init, - .cra_exit = p8_aes_ctr_exit, - .cra_blkcipher = { - .ivsize = 0, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = p8_aes_ctr_setkey, - .encrypt = p8_aes_ctr_crypt, - .decrypt = p8_aes_ctr_crypt, - }, + .cra_name = "ctr(aes)", + .cra_driver_name = "p8_aes_ctr", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = &crypto_blkcipher_type, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), + .cra_init = p8_aes_ctr_init, + .cra_exit = p8_aes_ctr_exit, + .cra_blkcipher = { + .ivsize = 0, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = p8_aes_ctr_setkey, + .encrypt = p8_aes_ctr_crypt, + .decrypt = p8_aes_ctr_crypt, + }, }; diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h index e963945a83e1..4cd34ee54a94 100644 --- a/drivers/crypto/vmx/aesp8-ppc.h +++ b/drivers/crypto/vmx/aesp8-ppc.h @@ -4,17 +4,18 @@ #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) struct aes_key { - u8 key[AES_MAX_KEYLENGTH]; - int rounds; + u8 key[AES_MAX_KEYLENGTH]; + int rounds; }; int aes_p8_set_encrypt_key(const u8 *userKey, const int bits, - struct aes_key *key); + struct aes_key *key); int aes_p8_set_decrypt_key(const u8 *userKey, const int bits, - struct aes_key *key); + struct aes_key *key); void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key); -void aes_p8_decrypt(const u8 *in, u8 *out,const struct aes_key *key); +void aes_p8_decrypt(const u8 *in, u8 *out, const struct aes_key *key); void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len, - const struct aes_key *key, u8 *iv, const int enc); + const struct aes_key *key, u8 *iv, const int enc); void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, - size_t len, const struct aes_key *key, const u8 *iv); + size_t len, const struct aes_key *key, + const u8 *iv); diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index d0ffe277af5c..4c3a8f7e5059 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -39,176 +39,180 @@ void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], - const u8 *in,size_t len); + const u8 *in, size_t len); struct p8_ghash_ctx { - u128 htable[16]; - struct crypto_shash *fallback; + u128 htable[16]; + struct crypto_shash *fallback; }; struct p8_ghash_desc_ctx { - u64 shash[2]; - u8 buffer[GHASH_DIGEST_SIZE]; - int bytes; - struct shash_desc fallback_desc; + u64 shash[2]; + u8 buffer[GHASH_DIGEST_SIZE]; + int bytes; + struct shash_desc fallback_desc; }; static int p8_ghash_init_tfm(struct crypto_tfm *tfm) { - const char *alg; - struct crypto_shash *fallback; - struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - - fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); - - crypto_shash_set_flags(fallback, - crypto_shash_get_flags((struct crypto_shash *) tfm)); - ctx->fallback = fallback; - - shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) - + crypto_shash_descsize(fallback); - - return 0; + const char *alg; + struct crypto_shash *fallback; + struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); + + crypto_shash_set_flags(fallback, + crypto_shash_get_flags((struct crypto_shash + *) tfm)); + ctx->fallback = fallback; + + shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) + + crypto_shash_descsize(fallback); + + return 0; } static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_shash(ctx->fallback); - ctx->fallback = NULL; - } + if (ctx->fallback) { + crypto_free_shash(ctx->fallback); + ctx->fallback = NULL; + } } static int p8_ghash_init(struct shash_desc *desc) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); - struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->bytes = 0; - memset(dctx->shash, 0, GHASH_DIGEST_SIZE); - dctx->fallback_desc.tfm = ctx->fallback; - dctx->fallback_desc.flags = desc->flags; - return crypto_shash_init(&dctx->fallback_desc); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + dctx->bytes = 0; + memset(dctx->shash, 0, GHASH_DIGEST_SIZE); + dctx->fallback_desc.tfm = ctx->fallback; + dctx->fallback_desc.flags = desc->flags; + return crypto_shash_init(&dctx->fallback_desc); } static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); - if (keylen != GHASH_KEY_LEN) - return -EINVAL; + if (keylen != GHASH_KEY_LEN) + return -EINVAL; - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_init_p8(ctx->htable, (const u64 *) key); - pagefault_enable(); - return crypto_shash_setkey(ctx->fallback, key, keylen); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_init_p8(ctx->htable, (const u64 *) key); + pagefault_enable(); + return crypto_shash_setkey(ctx->fallback, key, keylen); } static int p8_ghash_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) + const u8 *src, unsigned int srclen) { - unsigned int len; - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); - struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - if (IN_INTERRUPT) { - return crypto_shash_update(&dctx->fallback_desc, src, srclen); - } else { - if (dctx->bytes) { - if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { - memcpy(dctx->buffer + dctx->bytes, src, srclen); - dctx->bytes += srclen; - return 0; - } - memcpy(dctx->buffer + dctx->bytes, src, - GHASH_DIGEST_SIZE - dctx->bytes); - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, - GHASH_DIGEST_SIZE); - pagefault_enable(); - src += GHASH_DIGEST_SIZE - dctx->bytes; - srclen -= GHASH_DIGEST_SIZE - dctx->bytes; - dctx->bytes = 0; - } - len = srclen & ~(GHASH_DIGEST_SIZE - 1); - if (len) { - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, src, len); - pagefault_enable(); - src += len; - srclen -= len; - } - if (srclen) { - memcpy(dctx->buffer, src, srclen); - dctx->bytes = srclen; - } - return 0; - } + unsigned int len; + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + if (IN_INTERRUPT) { + return crypto_shash_update(&dctx->fallback_desc, src, + srclen); + } else { + if (dctx->bytes) { + if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { + memcpy(dctx->buffer + dctx->bytes, src, + srclen); + dctx->bytes += srclen; + return 0; + } + memcpy(dctx->buffer + dctx->bytes, src, + GHASH_DIGEST_SIZE - dctx->bytes); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, + dctx->buffer, GHASH_DIGEST_SIZE); + pagefault_enable(); + src += GHASH_DIGEST_SIZE - dctx->bytes; + srclen -= GHASH_DIGEST_SIZE - dctx->bytes; + dctx->bytes = 0; + } + len = srclen & ~(GHASH_DIGEST_SIZE - 1); + if (len) { + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, src, len); + pagefault_enable(); + src += len; + srclen -= len; + } + if (srclen) { + memcpy(dctx->buffer, src, srclen); + dctx->bytes = srclen; + } + return 0; + } } static int p8_ghash_final(struct shash_desc *desc, u8 *out) { - int i; - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); - struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - if (IN_INTERRUPT) { - return crypto_shash_final(&dctx->fallback_desc, out); - } else { - if (dctx->bytes) { - for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) - dctx->buffer[i] = 0; - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, - GHASH_DIGEST_SIZE); - pagefault_enable(); - dctx->bytes = 0; - } - memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); - return 0; - } + int i; + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + if (IN_INTERRUPT) { + return crypto_shash_final(&dctx->fallback_desc, out); + } else { + if (dctx->bytes) { + for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) + dctx->buffer[i] = 0; + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, + dctx->buffer, GHASH_DIGEST_SIZE); + pagefault_enable(); + dctx->bytes = 0; + } + memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); + return 0; + } } struct shash_alg p8_ghash_alg = { - .digestsize = GHASH_DIGEST_SIZE, - .init = p8_ghash_init, - .update = p8_ghash_update, - .final = p8_ghash_final, - .setkey = p8_ghash_setkey, - .descsize = sizeof(struct p8_ghash_desc_ctx), - .base = { - .cra_name = "ghash", - .cra_driver_name = "p8_ghash", - .cra_priority = 1000, - .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = GHASH_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct p8_ghash_ctx), - .cra_module = THIS_MODULE, - .cra_init = p8_ghash_init_tfm, - .cra_exit = p8_ghash_exit_tfm, - }, + .digestsize = GHASH_DIGEST_SIZE, + .init = p8_ghash_init, + .update = p8_ghash_update, + .final = p8_ghash_final, + .setkey = p8_ghash_setkey, + .descsize = sizeof(struct p8_ghash_desc_ctx), + .base = { + .cra_name = "ghash", + .cra_driver_name = "p8_ghash", + .cra_priority = 1000, + .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = GHASH_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_ghash_ctx), + .cra_module = THIS_MODULE, + .cra_init = p8_ghash_init_tfm, + .cra_exit = p8_ghash_exit_tfm, + }, }; diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index 4c398ddd8c10..e163d5770438 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c @@ -32,57 +32,57 @@ extern struct crypto_alg p8_aes_alg; extern struct crypto_alg p8_aes_cbc_alg; extern struct crypto_alg p8_aes_ctr_alg; static struct crypto_alg *algs[] = { - &p8_aes_alg, - &p8_aes_cbc_alg, - &p8_aes_ctr_alg, - NULL, + &p8_aes_alg, + &p8_aes_cbc_alg, + &p8_aes_ctr_alg, + NULL, }; int __init p8_init(void) { - int ret = 0; - struct crypto_alg **alg_it; + int ret = 0; + struct crypto_alg **alg_it; - if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) - return -ENODEV; + if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) + return -ENODEV; - for (alg_it = algs; *alg_it; alg_it++) { - ret = crypto_register_alg(*alg_it); - printk(KERN_INFO "crypto_register_alg '%s' = %d\n", - (*alg_it)->cra_name, ret); - if (ret) { - for (alg_it--; alg_it >= algs; alg_it--) - crypto_unregister_alg(*alg_it); - break; - } - } - if (ret) - return ret; + for (alg_it = algs; *alg_it; alg_it++) { + ret = crypto_register_alg(*alg_it); + printk(KERN_INFO "crypto_register_alg '%s' = %d\n", + (*alg_it)->cra_name, ret); + if (ret) { + for (alg_it--; alg_it >= algs; alg_it--) + crypto_unregister_alg(*alg_it); + break; + } + } + if (ret) + return ret; - ret = crypto_register_shash(&p8_ghash_alg); - if (ret) { - for (alg_it = algs; *alg_it; alg_it++) - crypto_unregister_alg(*alg_it); - } - return ret; + ret = crypto_register_shash(&p8_ghash_alg); + if (ret) { + for (alg_it = algs; *alg_it; alg_it++) + crypto_unregister_alg(*alg_it); + } + return ret; } void __exit p8_exit(void) { - struct crypto_alg **alg_it; + struct crypto_alg **alg_it; - for (alg_it = algs; *alg_it; alg_it++) { - printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); - crypto_unregister_alg(*alg_it); - } - crypto_unregister_shash(&p8_ghash_alg); + for (alg_it = algs; *alg_it; alg_it++) { + printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); + crypto_unregister_alg(*alg_it); + } + crypto_unregister_shash(&p8_ghash_alg); } module_init(p8_init); module_exit(p8_exit); MODULE_AUTHOR("Marcelo Cerri"); -MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions support on Power 8"); +MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions " + "support on Power 8"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); - -- cgit v1.2.3 From 8af7b0f809a3d34657433ec545d64dff9808ce34 Mon Sep 17 00:00:00 2001 From: Victoria Milhoan Date: Mon, 15 Jun 2015 16:52:57 -0700 Subject: crypto: caam - Fix incorrect size when DMA unmapping buffer The CAAM driver uses two data buffers to store data for a hashing operation, with one buffer defined as active. This change forces switching of the active buffer when executing a hashing operation to avoid a later DMA unmap using the length of the opposite buffer. Signed-off-by: Victoria Milhoan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index ba0532efd3ae..75125a26bd2a 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -835,17 +835,17 @@ static int ahash_update_ctx(struct ahash_request *req) src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + sec4_sg_src_index, chained); - if (*next_buflen) { + if (*next_buflen) scatterwalk_map_and_copy(next_buf, req->src, to_hash - *buflen, *next_buflen, 0); - state->current_buf = !state->current_buf; - } } else { (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; } + state->current_buf = !state->current_buf; + sh_len = desc_len(sh_desc); desc = edesc->hw_desc; init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | @@ -1268,9 +1268,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) scatterwalk_map_and_copy(next_buf, req->src, to_hash - *buflen, *next_buflen, 0); - state->current_buf = !state->current_buf; } + state->current_buf = !state->current_buf; + sh_len = desc_len(sh_desc); desc = edesc->hw_desc; init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | -- cgit v1.2.3 From f4ec6aa5b018e0ec7edea916644686d572bc595c Mon Sep 17 00:00:00 2001 From: Victoria Milhoan Date: Mon, 15 Jun 2015 16:52:58 -0700 Subject: crypto: caam - Provide correct value to iounmap() in controller driver Fix a "Trying to vfree() nonexistent vm area" error when unloading the CAAM controller module by providing the correct pointer value to iounmap(). Signed-off-by: Victoria Milhoan Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index efba4ccd4fac..efacab7539ef 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -301,7 +301,7 @@ static int caam_remove(struct platform_device *pdev) #endif /* Unmap controller region */ - iounmap(&ctrl); + iounmap(ctrl); return ret; } @@ -496,7 +496,7 @@ static int caam_probe(struct platform_device *pdev) sizeof(struct platform_device *) * rspec, GFP_KERNEL); if (ctrlpriv->jrpdev == NULL) { - iounmap(&ctrl); + iounmap(ctrl); return -ENOMEM; } -- cgit v1.2.3 From 201f28f055917300c3301b19d0e0e674b9cf8cb7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 16 Jun 2015 13:54:21 +0800 Subject: crypto: nx - Convert GCM to new AEAD interface This patch converts the nx GCM implementations to the new AEAD interface. This is compile-tested only. Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-gcm.c | 107 ++++++++++++++++++----------------------- drivers/crypto/nx/nx.c | 26 ++++++---- drivers/crypto/nx/nx.h | 7 +-- 3 files changed, 67 insertions(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index e4e64f688158..08ac6d48688c 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -93,14 +93,6 @@ out: return rc; } -static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - crypto_aead_crt(tfm)->authsize = authsize; - - return 0; -} - static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { @@ -113,8 +105,6 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, return -EINVAL; } - crypto_aead_crt(tfm)->authsize = authsize; - return 0; } @@ -131,7 +121,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { - scatterwalk_start(&walk, req->assoc); + scatterwalk_start(&walk, req->src); scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); return 0; @@ -156,7 +146,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, - req->assoc, processed, &to_process); + req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; @@ -222,7 +212,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc) NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, - req->assoc, processed, &to_process); + req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; @@ -374,7 +364,8 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; desc.tfm = (struct crypto_blkcipher *) req->base.tfm; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, - req->src, &to_process, processed, + req->src, &to_process, + processed + req->assoclen, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) @@ -409,17 +400,19 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) mac: if (enc) { /* copy out the auth tag */ - scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, - req->dst, nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_TO_SG); + scatterwalk_map_and_copy( + csbcpb->cpb.aes_gcm.out_pat_or_mac, + req->dst, req->assoclen + nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_TO_SG); } else { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; - scatterwalk_map_and_copy(itag, req->src, nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_FROM_SG); + scatterwalk_map_and_copy( + itag, req->src, req->assoclen + nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_FROM_SG); rc = memcmp(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; @@ -478,45 +471,39 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req) * during encrypt/decrypt doesn't solve this problem, because it calls * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, * but instead uses this tfm->blocksize. */ -struct crypto_alg nx_gcm_aes_alg = { - .cra_name = "gcm(aes)", - .cra_driver_name = "gcm-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_gcm_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setkey = gcm_aes_nx_set_key, - .setauthsize = gcm_aes_nx_setauthsize, - .encrypt = gcm_aes_nx_encrypt, - .decrypt = gcm_aes_nx_decrypt, - } +struct aead_alg nx_gcm_aes_alg = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-nx", + .cra_priority = 300, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_module = THIS_MODULE, + }, + .init = nx_crypto_ctx_aes_gcm_init, + .exit = nx_crypto_ctx_aead_exit, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm_aes_nx_set_key, + .encrypt = gcm_aes_nx_encrypt, + .decrypt = gcm_aes_nx_decrypt, }; -struct crypto_alg nx_gcm4106_aes_alg = { - .cra_name = "rfc4106(gcm(aes))", - .cra_driver_name = "rfc4106-gcm-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_nivaead_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_gcm_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_aead = { - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, - .geniv = "seqiv", - .setkey = gcm4106_aes_nx_set_key, - .setauthsize = gcm4106_aes_nx_setauthsize, - .encrypt = gcm4106_aes_nx_encrypt, - .decrypt = gcm4106_aes_nx_decrypt, - } +struct aead_alg nx_gcm4106_aes_alg = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-nx", + .cra_priority = 300, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_module = THIS_MODULE, + }, + .init = nx_crypto_ctx_aes_gcm_init, + .exit = nx_crypto_ctx_aead_exit, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm4106_aes_nx_set_key, + .setauthsize = gcm4106_aes_nx_setauthsize, + .encrypt = gcm4106_aes_nx_encrypt, + .decrypt = gcm4106_aes_nx_decrypt, }; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 2e2529ce8d31..847350534cc1 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -19,8 +19,8 @@ * Author: Kent Yoder */ +#include #include -#include #include #include #include @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -531,11 +530,11 @@ static int nx_register_algs(void) if (rc) goto out_unreg_ctr; - rc = crypto_register_alg(&nx_gcm_aes_alg); + rc = crypto_register_aead(&nx_gcm_aes_alg); if (rc) goto out_unreg_ctr3686; - rc = crypto_register_alg(&nx_gcm4106_aes_alg); + rc = crypto_register_aead(&nx_gcm4106_aes_alg); if (rc) goto out_unreg_gcm; @@ -570,9 +569,9 @@ out_unreg_ccm4309: out_unreg_ccm: crypto_unregister_alg(&nx_ccm_aes_alg); out_unreg_gcm4106: - crypto_unregister_alg(&nx_gcm4106_aes_alg); + crypto_unregister_aead(&nx_gcm4106_aes_alg); out_unreg_gcm: - crypto_unregister_alg(&nx_gcm_aes_alg); + crypto_unregister_aead(&nx_gcm_aes_alg); out_unreg_ctr3686: crypto_unregister_alg(&nx_ctr3686_aes_alg); out_unreg_ctr: @@ -639,9 +638,9 @@ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) NX_MODE_AES_CCM); } -int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm) +int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) { - return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, NX_MODE_AES_GCM); } @@ -693,6 +692,13 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm) nx_ctx->out_sg = NULL; } +void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) +{ + struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); + + kzfree(nx_ctx->kmem); +} + static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) { dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", @@ -721,8 +727,8 @@ static int nx_remove(struct vio_dev *viodev) crypto_unregister_alg(&nx_ccm_aes_alg); crypto_unregister_alg(&nx_ccm4309_aes_alg); - crypto_unregister_alg(&nx_gcm_aes_alg); - crypto_unregister_alg(&nx_gcm4106_aes_alg); + crypto_unregister_aead(&nx_gcm_aes_alg); + crypto_unregister_aead(&nx_gcm4106_aes_alg); crypto_unregister_alg(&nx_ctr_aes_alg); crypto_unregister_alg(&nx_ctr3686_aes_alg); crypto_unregister_alg(&nx_cbc_aes_alg); diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 41b87ee03fe2..de3ea8738146 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -143,13 +143,14 @@ struct nx_crypto_ctx { /* prototypes */ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm); -int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm); int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm); int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm); int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm); void nx_crypto_ctx_exit(struct crypto_tfm *tfm); +void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm); void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, u32 may_sleep); @@ -176,8 +177,8 @@ void nx_debugfs_fini(struct nx_crypto_driver *); extern struct crypto_alg nx_cbc_aes_alg; extern struct crypto_alg nx_ecb_aes_alg; -extern struct crypto_alg nx_gcm_aes_alg; -extern struct crypto_alg nx_gcm4106_aes_alg; +extern struct aead_alg nx_gcm_aes_alg; +extern struct aead_alg nx_gcm4106_aes_alg; extern struct crypto_alg nx_ctr_aes_alg; extern struct crypto_alg nx_ctr3686_aes_alg; extern struct crypto_alg nx_ccm_aes_alg; -- cgit v1.2.3 From 6c94711cbdf595766bc3295e437c3579943cd846 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 16 Jun 2015 13:54:22 +0800 Subject: crypto: caam - Handle errors in dma_map_sg_chained Currently dma_map_sg_chained does not handle errors from the underlying dma_map_sg calls. This patch adds rollback in case of an error by simply calling dma_unmap_sg_chained for the ones that we've already mapped. All current callers ignore the return value so this should have no impact on them. Signed-off-by: Herbert Xu --- drivers/crypto/caam/sg_sw_sec4.h | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index efbc1db8da53..b68b74cc7b77 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -100,34 +100,41 @@ static inline int sg_count(struct scatterlist *sg_list, int nbytes, return sg_nents; } -static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, - unsigned int nents, enum dma_data_direction dir, - bool chained) +static inline void dma_unmap_sg_chained( + struct device *dev, struct scatterlist *sg, unsigned int nents, + enum dma_data_direction dir, bool chained) { if (unlikely(chained)) { int i; for (i = 0; i < nents; i++) { - dma_map_sg(dev, sg, 1, dir); + dma_unmap_sg(dev, sg, 1, dir); sg = sg_next(sg); } - } else { - dma_map_sg(dev, sg, nents, dir); + } else if (nents) { + dma_unmap_sg(dev, sg, nents, dir); } - return nents; } -static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, - unsigned int nents, enum dma_data_direction dir, - bool chained) +static inline int dma_map_sg_chained( + struct device *dev, struct scatterlist *sg, unsigned int nents, + enum dma_data_direction dir, bool chained) { + struct scatterlist *first = sg; + if (unlikely(chained)) { int i; for (i = 0; i < nents; i++) { - dma_unmap_sg(dev, sg, 1, dir); + if (!dma_map_sg(dev, sg, 1, dir)) { + dma_unmap_sg_chained(dev, first, i, dir, + chained); + nents = 0; + break; + } + sg = sg_next(sg); } - } else { - dma_unmap_sg(dev, sg, nents, dir); - } + } else + nents = dma_map_sg(dev, sg, nents, dir); + return nents; } -- cgit v1.2.3 From f2147b88b2b17d4c04738c75cc5a1d0dea60fa76 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 16 Jun 2015 13:54:23 +0800 Subject: crypto: caam - Convert GCM to new AEAD interface This patch converts the caam GCM implementations to the new AEAD interface. This is compile-tested only. Note that all IV generation for GCM algorithms have been removed. The reason is that the current generation uses purely random IVs which is not appropriate for counter-based algorithms where we first and foremost require uniqueness. Of course there is no reason why you couldn't implement seqiv or seqniv within caam since all they do is xor the sequence number with a salt, but since I can't test this on actual hardware I'll leave it alone for now. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 1430 +++++++++++++++++++++-------------------- 1 file changed, 741 insertions(+), 689 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 3c37fe63e598..f206521d7525 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -65,6 +65,10 @@ /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ #define CAAM_MAX_IV_LENGTH 16 +#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) +#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ + CAAM_CMD_SZ * 4) + /* length of descriptors text */ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) @@ -79,18 +83,16 @@ #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) -#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ) -#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ) +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ) -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ) -#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ) +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) -#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ) -#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ) -#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ) +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ @@ -98,9 +100,7 @@ #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 15 * CAAM_CMD_SZ) -#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \ - CAAM_MAX_KEY_SIZE) -#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) +#define DESC_MAX_USED_LEN (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) #ifdef DEBUG /* for print_hex_dumps with line references */ @@ -273,7 +273,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* aead_encrypt shared descriptor */ + /* old_aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; init_sh_desc(desc, HDR_SHARE_SERIAL); @@ -362,7 +362,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) desc = ctx->sh_desc_dec; - /* aead_decrypt shared descriptor */ + /* old_aead_decrypt shared descriptor */ init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip if already shared */ @@ -496,7 +496,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* aead_encrypt shared descriptor */ + /* old_aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; /* Note: Context registers are saved. */ @@ -565,7 +565,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* aead_decrypt shared descriptor */ + /* old_aead_decrypt shared descriptor */ desc = ctx->sh_desc_dec; /* Note: Context registers are saved. */ @@ -738,7 +738,6 @@ static int aead_setauthsize(struct crypto_aead *authenc, static int gcm_set_sh_desc(struct crypto_aead *aead) { - unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; @@ -754,7 +753,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ - if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN + + if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -777,34 +776,34 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + /* if assoclen + cryptlen is ZERO, skip to ICV write */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); - /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); + /* if assoclen is ZERO, skip reading the assoc data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* cryptlen = seqinlen - assoclen */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); /* if cryptlen is ZERO jump to zero-payload commands */ - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); - /* read IV */ - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); - - /* if assoclen is ZERO, skip reading the assoc data */ - append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); set_jump_tgt_here(desc, zero_assoc_jump_cmd1); - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* write encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -814,31 +813,17 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); /* jump the zero-payload commands */ - append_jump(desc, JUMP_TEST_ALL | 7); + append_jump(desc, JUMP_TEST_ALL | 2); /* zero-payload commands */ set_jump_tgt_here(desc, zero_payload_jump_cmd); - /* if assoclen is ZERO, jump to IV reading - is the only input data */ - append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); - /* read IV */ - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); - /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); - /* jump to ICV writing */ - append_jump(desc, JUMP_TEST_ALL | 2); - - /* read IV - is the only input data */ + /* There is no input data */ set_jump_tgt_here(desc, zero_assoc_jump_cmd2); - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | - FIFOLD_TYPE_LAST1); /* write ICV */ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | @@ -862,7 +847,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) * must all fit into the 64-word Descriptor h/w Buffer */ keys_fit_inline = false; - if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN + + if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -886,33 +871,30 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + ivsize); - - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ); + /* if assoclen is ZERO, skip reading the assoc data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); - /* read IV */ - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* jump to zero-payload command if cryptlen is zero */ - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); + /* skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); - append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); - /* if asoclen is ZERO, skip reading assoc data */ - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd1); - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + /* cryptlen = seqoutlen - assoclen */ + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* jump to zero-payload command if cryptlen is zero */ + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* store encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -921,21 +903,9 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); - /* jump the zero-payload commands */ - append_jump(desc, JUMP_TEST_ALL | 4); - /* zero-payload command */ set_jump_tgt_here(desc, zero_payload_jump_cmd); - /* if assoclen is ZERO, jump to ICV reading */ - append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); - /* read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); - set_jump_tgt_here(desc, zero_assoc_jump_cmd2); - /* read ICV */ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); @@ -968,13 +938,11 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int rfc4106_set_sh_desc(struct crypto_aead *aead) { - unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; - u32 *key_jump_cmd, *move_cmd, *write_iv_cmd; + u32 *key_jump_cmd; u32 *desc; - u32 geniv; if (!ctx->enckeylen || !ctx->authsize) return 0; @@ -984,7 +952,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ - if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN + + if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -1007,29 +975,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); - - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); - - /* Read Salt */ - append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), - 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); - /* Read AES-GCM-ESP IV */ - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + /* cryptlen = seqoutlen - assoclen */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + /* Will read cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Write encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -1083,30 +1043,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + ivsize); - - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); - - /* Will write cryptlen bytes */ - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* Read Salt */ - append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), - 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); - /* Read AES-GCM-ESP IV */ - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + /* Will write cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + /* Will read cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Store payload data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -1132,107 +1083,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) desc_bytes(desc), 1); #endif - /* - * Job Descriptor and Shared Descriptors - * must all fit into the 64-word Descriptor h/w Buffer - */ - keys_fit_inline = false; - if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN + - ctx->split_key_pad_len + ctx->enckeylen <= - CAAM_DESC_BYTES_MAX) - keys_fit_inline = true; - - /* rfc4106_givencrypt shared descriptor */ - desc = ctx->sh_desc_givenc; - - init_sh_desc(desc, HDR_SHARE_SERIAL); - - /* Skip key loading if it is loaded due to sharing */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); - if (keys_fit_inline) - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - else - append_key(desc, ctx->key_dma, ctx->enckeylen, - CLASS_1 | KEY_DEST_CLASS_REG); - set_jump_tgt_here(desc, key_jump_cmd); - - /* Generate IV */ - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); - move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF | - (ivsize << MOVE_LEN_SHIFT)); - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); - - /* Copy generated IV to OFIFO */ - write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO | - (ivsize << MOVE_LEN_SHIFT)); - - /* Class 1 operation */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - - /* ivsize + cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* assoclen = seqinlen - (ivsize + cryptlen) */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - - /* Will write ivsize + cryptlen */ - append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); - - /* Read Salt and generated IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | - FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12); - /* Append Salt */ - append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); - set_move_tgt_here(desc, move_cmd); - set_move_tgt_here(desc, write_iv_cmd); - /* Blank commands. Will be overwritten by generated IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* No need to reload iv */ - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); - - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); - - /* Will read cryptlen */ - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - - /* Store generated IV and encrypted data */ - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); - - /* Read payload data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); - - /* Write ICV */ - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT); - - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; - } -#ifdef DEBUG - print_hex_dump(KERN_ERR, - "rfc4106 givenc shdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif - return 0; } @@ -1249,14 +1099,12 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, static int rfc4543_set_sh_desc(struct crypto_aead *aead) { - unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; - u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd; + u32 *key_jump_cmd; u32 *read_move_cmd, *write_move_cmd; u32 *desc; - u32 geniv; if (!ctx->enckeylen || !ctx->authsize) return 0; @@ -1266,7 +1114,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ - if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN + + if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -1289,48 +1137,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* Load AES-GMAC ESP IV into Math1 register */ - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | - LDST_CLASS_DECO | ivsize); - - /* Wait the DMA transaction to finish */ - append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | - (1 << JUMP_OFFSET_SHIFT)); - - /* Overwrite blank immediate AES-GMAC ESP IV data */ - write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (ivsize << MOVE_LEN_SHIFT)); - - /* Overwrite blank immediate AAD data */ - write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (ivsize << MOVE_LEN_SHIFT)); - - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* assoclen = (seqinlen - ivsize) - cryptlen */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - - /* Read Salt and AES-GMAC ESP IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + ivsize)); - /* Append Salt */ - append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); - set_move_tgt_here(desc, write_iv_cmd); - /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD); - - /* Will read cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); - - /* Will write cryptlen bytes */ - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + /* assoclen + cryptlen = seqinlen */ + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); /* * MOVE_LEN opcode is not available in all SEC HW revisions, @@ -1342,16 +1150,13 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | (0x8 << MOVE_LEN_SHIFT)); - /* Authenticate AES-GMAC ESP IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | ivsize); - set_move_tgt_here(desc, write_aad_cmd); - /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ + /* Will read assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - /* Read and write cryptlen bytes */ + /* Will write assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Read and write assoclen + cryptlen bytes */ aead_append_src_dst(desc, FIFOLD_TYPE_AAD); set_move_tgt_here(desc, read_move_cmd); @@ -1382,7 +1187,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) * must all fit into the 64-word Descriptor h/w Buffer */ keys_fit_inline = false; - if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN + + if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -1405,28 +1210,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* Load AES-GMAC ESP IV into Math1 register */ - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | - LDST_CLASS_DECO | ivsize); - - /* Wait the DMA transaction to finish */ - append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | - (1 << JUMP_OFFSET_SHIFT)); - - /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize); - - /* Overwrite blank immediate AES-GMAC ESP IV data */ - write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (ivsize << MOVE_LEN_SHIFT)); - - /* Overwrite blank immediate AAD data */ - write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (ivsize << MOVE_LEN_SHIFT)); - - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + /* assoclen + cryptlen = seqoutlen */ + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* * MOVE_LEN opcode is not available in all SEC HW revisions, @@ -1438,40 +1223,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | (0x8 << MOVE_LEN_SHIFT)); - /* Read Salt and AES-GMAC ESP IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + ivsize)); - /* Append Salt */ - append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); - set_move_tgt_here(desc, write_iv_cmd); - /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD); - - /* Will read cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); - - /* Will write cryptlen bytes */ - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + /* Will read assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); - /* Authenticate AES-GMAC ESP IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | ivsize); - set_move_tgt_here(desc, write_aad_cmd); - /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ + /* Will write assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Store payload data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); - /* In-snoop cryptlen data */ + /* In-snoop assoclen + cryptlen data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); @@ -1499,148 +1260,19 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) desc_bytes(desc), 1); #endif - /* - * Job Descriptor and Shared Descriptors - * must all fit into the 64-word Descriptor h/w Buffer - */ - keys_fit_inline = false; - if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN + - ctx->enckeylen <= CAAM_DESC_BYTES_MAX) - keys_fit_inline = true; - - /* rfc4543_givencrypt shared descriptor */ - desc = ctx->sh_desc_givenc; - - init_sh_desc(desc, HDR_SHARE_SERIAL); - - /* Skip key loading if it is loaded due to sharing */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); - if (keys_fit_inline) - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - else - append_key(desc, ctx->key_dma, ctx->enckeylen, - CLASS_1 | KEY_DEST_CLASS_REG); - set_jump_tgt_here(desc, key_jump_cmd); - - /* Generate IV */ - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); - /* Move generated IV to Math1 register */ - append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 | - (ivsize << MOVE_LEN_SHIFT)); - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + return 0; +} - /* Overwrite blank immediate AES-GMAC IV data */ - write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (ivsize << MOVE_LEN_SHIFT)); +static int rfc4543_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); - /* Overwrite blank immediate AAD data */ - write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (ivsize << MOVE_LEN_SHIFT)); + ctx->authsize = authsize; + rfc4543_set_sh_desc(authenc); - /* Copy generated IV to OFIFO */ - append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO | - (ivsize << MOVE_LEN_SHIFT)); - - /* Class 1 operation */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - - /* ivsize + cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* assoclen = seqinlen - (ivsize + cryptlen) */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - - /* Will write ivsize + cryptlen */ - append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); - - /* - * MOVE_LEN opcode is not available in all SEC HW revisions, - * thus need to do some magic, i.e. self-patch the descriptor - * buffer. - */ - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | - (0x6 << MOVE_LEN_SHIFT)); - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | - (0x8 << MOVE_LEN_SHIFT)); - - /* Read Salt and AES-GMAC generated IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + ivsize)); - /* Append Salt */ - append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); - set_move_tgt_here(desc, write_iv_cmd); - /* Blank commands. Will be overwritten by AES-GMAC generated IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* No need to reload iv */ - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); - - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD); - - /* Will read cryptlen */ - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - - /* Authenticate AES-GMAC IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | ivsize); - set_move_tgt_here(desc, write_aad_cmd); - /* Blank commands. Will be overwritten by AES-GMAC IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* Read and write cryptlen bytes */ - aead_append_src_dst(desc, FIFOLD_TYPE_AAD); - - set_move_tgt_here(desc, read_move_cmd); - set_move_tgt_here(desc, write_move_cmd); - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); - /* Move payload data to OFIFO */ - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); - - /* Write ICV */ - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT); - - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; - } -#ifdef DEBUG - print_hex_dump(KERN_ERR, - "rfc4543 givenc shdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif - - return 0; -} - -static int rfc4543_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) -{ - struct caam_ctx *ctx = crypto_aead_ctx(authenc); - - ctx->authsize = authsize; - rfc4543_set_sh_desc(authenc); - - return 0; -} + return 0; +} static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) @@ -2100,7 +1732,7 @@ struct aead_edesc { int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; - u32 hw_desc[0]; + u32 hw_desc[]; }; /* @@ -2153,6 +1785,16 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, static void aead_unmap(struct device *dev, struct aead_edesc *edesc, struct aead_request *req) +{ + caam_unmap(dev, req->src, req->dst, + edesc->src_nents, edesc->src_chained, edesc->dst_nents, + edesc->dst_chained, 0, 0, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); +} + +static void old_aead_unmap(struct device *dev, + struct aead_edesc *edesc, + struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); int ivsize = crypto_aead_ivsize(aead); @@ -2184,6 +1826,28 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct aead_request *req = context; struct aead_edesc *edesc; + +#ifdef DEBUG + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = container_of(desc, struct aead_edesc, hw_desc[0]); + + if (err) + caam_jr_strstatus(jrdev, err); + + aead_unmap(jrdev, edesc, req); + + kfree(edesc); + + aead_request_complete(req, err); +} + +static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct aead_edesc *edesc; #ifdef DEBUG struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -2198,7 +1862,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - aead_unmap(jrdev, edesc, req); + old_aead_unmap(jrdev, edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", @@ -2223,6 +1887,34 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct aead_request *req = context; struct aead_edesc *edesc; + +#ifdef DEBUG + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = container_of(desc, struct aead_edesc, hw_desc[0]); + + if (err) + caam_jr_strstatus(jrdev, err); + + aead_unmap(jrdev, edesc, req); + + /* + * verify hw auth check passed else return -EBADMSG + */ + if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) + err = -EBADMSG; + + kfree(edesc); + + aead_request_complete(req, err); +} + +static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct aead_edesc *edesc; #ifdef DEBUG struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -2246,7 +1938,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - aead_unmap(jrdev, edesc, req); + old_aead_unmap(jrdev, edesc, req); /* * verify hw auth check passed else return -EBADMSG @@ -2342,10 +2034,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, /* * Fill in aead job descriptor */ -static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, - struct aead_edesc *edesc, - struct aead_request *req, - bool all_contig, bool encrypt) +static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr, + struct aead_edesc *edesc, + struct aead_request *req, + bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -2424,6 +2116,97 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, out_options); } +/* + * Fill in aead job descriptor + */ +static void init_aead_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int authsize = ctx->authsize; + u32 *desc = edesc->hw_desc; + u32 out_options, in_options; + dma_addr_t dst_dma, src_dma; + int len, sec4_sg_index = 0; + dma_addr_t ptr; + u32 *sh_desc; + + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (all_contig) { + src_dma = sg_dma_address(req->src); + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->src_nents; + in_options = LDST_SGF; + } + + append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, + in_options); + + dst_dma = src_dma; + out_options = in_options; + + if (unlikely(req->src != req->dst)) { + if (!edesc->dst_nents) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + } + + if (encrypt) + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen + authsize, + out_options); + else + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen - authsize, + out_options); + + /* REG3 = assoclen */ + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); +} + +static void init_gcm_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc = edesc->hw_desc; + bool generic_gcm = (ivsize == 12); + unsigned int last; + + init_aead_job(req, edesc, all_contig, encrypt); + + /* BUG This should not be specific to generic GCM. */ + last = 0; + if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen)) + last = FIFOLD_TYPE_LAST1; + + /* Read GCM IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); + /* Append Salt */ + if (!generic_gcm) + append_data(desc, ctx->key + ctx->enckeylen, 4); + /* Append IV */ + append_data(desc, req->iv, ivsize); + /* End of blank commands */ +} + /* * Fill in aead givencrypt job descriptor */ @@ -2608,9 +2391,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, /* * allocate and map the aead extended descriptor */ -static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, - int desc_bytes, bool *all_contig_ptr, - bool encrypt) +static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req, + int desc_bytes, + bool *all_contig_ptr, + bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -2661,29 +2445,132 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, return ERR_PTR(-ENOMEM); } - if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == - OP_ALG_ALGSEL_AES) && - ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) - is_gcm = true; + if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_AES) && + ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) + is_gcm = true; + + /* + * Check if data are contiguous. + * GCM expected input sequence: IV, AAD, text + * All other - expected input sequence: AAD, IV, text + */ + if (is_gcm) + all_contig = (!assoc_nents && + iv_dma + ivsize == sg_dma_address(req->assoc) && + !src_nents && sg_dma_address(req->assoc) + + req->assoclen == sg_dma_address(req->src)); + else + all_contig = (!assoc_nents && sg_dma_address(req->assoc) + + req->assoclen == iv_dma && !src_nents && + iv_dma + ivsize == sg_dma_address(req->src)); + if (!all_contig) { + assoc_nents = assoc_nents ? : 1; + src_nents = src_nents ? : 1; + sec4_sg_len = assoc_nents + 1 + src_nents; + } + + sec4_sg_len += dst_nents; + + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + edesc->assoc_nents = assoc_nents; + edesc->assoc_chained = assoc_chained; + edesc->src_nents = src_nents; + edesc->src_chained = src_chained; + edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; + edesc->iv_dma = iv_dma; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + + desc_bytes; + *all_contig_ptr = all_contig; + + sec4_sg_index = 0; + if (!all_contig) { + if (!is_gcm) { + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); + sec4_sg_index += assoc_nents; + } + + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, + iv_dma, ivsize, 0); + sec4_sg_index += 1; + + if (is_gcm) { + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); + sec4_sg_index += assoc_nents; + } + + sg_to_sec4_sg_last(req->src, + src_nents, + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += src_nents; + } + if (dst_nents) { + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); + } + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + return ERR_PTR(-ENOMEM); + } + + return edesc; +} + +/* + * allocate and map the aead extended descriptor + */ +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, + int desc_bytes, bool *all_contig_ptr, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + int src_nents, dst_nents = 0; + struct aead_edesc *edesc; + int sgc; + bool all_contig = true; + bool src_chained = false, dst_chained = false; + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + unsigned int authsize = ctx->authsize; + + if (unlikely(req->dst != req->src)) { + src_nents = sg_count(req->src, req->assoclen + req->cryptlen, + &src_chained); + dst_nents = sg_count(req->dst, + req->assoclen + req->cryptlen + + (encrypt ? authsize : (-authsize)), + &dst_chained); + } else { + src_nents = sg_count(req->src, + req->assoclen + req->cryptlen + + (encrypt ? authsize : 0), + &src_chained); + } - /* - * Check if data are contiguous. - * GCM expected input sequence: IV, AAD, text - * All other - expected input sequence: AAD, IV, text - */ - if (is_gcm) - all_contig = (!assoc_nents && - iv_dma + ivsize == sg_dma_address(req->assoc) && - !src_nents && sg_dma_address(req->assoc) + - req->assoclen == sg_dma_address(req->src)); - else - all_contig = (!assoc_nents && sg_dma_address(req->assoc) + - req->assoclen == iv_dma && !src_nents && - iv_dma + ivsize == sg_dma_address(req->src)); + /* Check if data are contiguous. */ + all_contig = !src_nents; if (!all_contig) { - assoc_nents = assoc_nents ? : 1; src_nents = src_nents ? : 1; - sec4_sg_len = assoc_nents + 1 + src_nents; + sec4_sg_len = src_nents; } sec4_sg_len += dst_nents; @@ -2691,64 +2578,78 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + + edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); } - edesc->assoc_nents = assoc_nents; - edesc->assoc_chained = assoc_chained; + if (likely(req->src == req->dst)) { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); + if (unlikely(!sgc)) { + dev_err(jrdev, "unable to map source\n"); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + if (unlikely(!sgc)) { + dev_err(jrdev, "unable to map source\n"); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); + if (unlikely(!sgc)) { + dev_err(jrdev, "unable to map destination\n"); + dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + } + edesc->src_nents = src_nents; edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; edesc->dst_chained = dst_chained; - edesc->iv_dma = iv_dma; - edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; *all_contig_ptr = all_contig; sec4_sg_index = 0; if (!all_contig) { - if (!is_gcm) { - sg_to_sec4_sg_len(req->assoc, req->assoclen, - edesc->sec4_sg + sec4_sg_index); - sec4_sg_index += assoc_nents; - } - - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, - iv_dma, ivsize, 0); - sec4_sg_index += 1; - - if (is_gcm) { - sg_to_sec4_sg_len(req->assoc, req->assoclen, - edesc->sec4_sg + sec4_sg_index); - sec4_sg_index += assoc_nents; - } - - sg_to_sec4_sg_last(req->src, - src_nents, - edesc->sec4_sg + - sec4_sg_index, 0); + sg_to_sec4_sg(req->src, src_nents, + edesc->sec4_sg + sec4_sg_index, 0); sec4_sg_index += src_nents; } if (dst_nents) { sg_to_sec4_sg_last(req->dst, dst_nents, edesc->sec4_sg + sec4_sg_index, 0); } + + if (!sec4_sg_bytes) + return edesc; + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); + aead_unmap(jrdev, edesc, req); + kfree(edesc); return ERR_PTR(-ENOMEM); } + edesc->sec4_sg_bytes = sec4_sg_bytes; + return edesc; } -static int aead_encrypt(struct aead_request *req) +static int gcm_encrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -2759,14 +2660,12 @@ static int aead_encrypt(struct aead_request *req) int ret = 0; /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig, true); + edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor */ - init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, - all_contig, true); + init_gcm_job(req, edesc, all_contig, true); #ifdef DEBUG print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -2785,7 +2684,79 @@ static int aead_encrypt(struct aead_request *req) return ret; } -static int aead_decrypt(struct aead_request *req) +static int old_aead_encrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &all_contig, true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor */ + old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, + all_contig, true); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + old_aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int gcm_decrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor*/ + init_gcm_job(req, edesc, all_contig, false); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int old_aead_decrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -2796,8 +2767,8 @@ static int aead_decrypt(struct aead_request *req) int ret = 0; /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig, false); + edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &all_contig, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -2808,8 +2779,8 @@ static int aead_decrypt(struct aead_request *req) #endif /* Create and submit job descriptor*/ - init_aead_job(ctx->sh_desc_dec, - ctx->sh_desc_dec_dma, edesc, req, all_contig, false); + old_init_aead_job(ctx->sh_desc_dec, + ctx->sh_desc_dec_dma, edesc, req, all_contig, false); #ifdef DEBUG print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -2817,11 +2788,11 @@ static int aead_decrypt(struct aead_request *req) #endif desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req); if (!ret) { ret = -EINPROGRESS; } else { - aead_unmap(jrdev, edesc, req); + old_aead_unmap(jrdev, edesc, req); kfree(edesc); } @@ -2995,7 +2966,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request return edesc; } -static int aead_givencrypt(struct aead_givcrypt_request *areq) +static int old_aead_givencrypt(struct aead_givcrypt_request *areq) { struct aead_request *req = &areq->areq; struct aead_edesc *edesc; @@ -3029,11 +3000,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) #endif desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); if (!ret) { ret = -EINPROGRESS; } else { - aead_unmap(jrdev, edesc, req); + old_aead_unmap(jrdev, edesc, req); kfree(edesc); } @@ -3042,7 +3013,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) static int aead_null_givencrypt(struct aead_givcrypt_request *areq) { - return aead_encrypt(&areq->areq); + return old_aead_encrypt(&areq->areq); } /* @@ -3392,8 +3363,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "", .ivsize = NULL_IV_SIZE, @@ -3411,8 +3382,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "", .ivsize = NULL_IV_SIZE, @@ -3430,8 +3401,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "", .ivsize = NULL_IV_SIZE, @@ -3450,8 +3421,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "", .ivsize = NULL_IV_SIZE, @@ -3470,8 +3441,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "", .ivsize = NULL_IV_SIZE, @@ -3490,8 +3461,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "", .ivsize = NULL_IV_SIZE, @@ -3510,9 +3481,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -3529,9 +3500,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -3548,9 +3519,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -3568,9 +3539,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -3588,9 +3559,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -3609,9 +3580,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -3629,9 +3600,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -3648,9 +3619,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -3667,9 +3638,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -3687,9 +3658,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -3707,9 +3678,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -3727,9 +3698,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -3747,9 +3718,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -3766,9 +3737,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -3785,9 +3756,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -3805,9 +3776,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -3825,9 +3796,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -3845,9 +3816,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -3865,9 +3836,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -3884,9 +3855,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -3903,9 +3874,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -3923,9 +3894,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -3943,9 +3914,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -3963,9 +3934,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -3975,58 +3946,6 @@ static struct caam_alg_template driver_algs[] = { OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, }, - { - .name = "rfc4106(gcm(aes))", - .driver_name = "rfc4106-gcm-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = rfc4106_setkey, - .setauthsize = rfc4106_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "", - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, - }, - { - .name = "rfc4543(gcm(aes))", - .driver_name = "rfc4543-gcm-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = rfc4543_setkey, - .setauthsize = rfc4543_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "", - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, - }, - /* Galois Counter Mode */ - { - .name = "gcm(aes)", - .driver_name = "gcm-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = gcm_setkey, - .setauthsize = gcm_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = NULL, - .geniv = "", - .ivsize = 12, - .maxauthsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, - }, /* ablkcipher descriptor */ { .name = "cbc(aes)", @@ -4116,21 +4035,84 @@ static struct caam_alg_template driver_algs[] = { } }; -struct caam_crypto_alg { - struct list_head entry; +struct caam_alg_entry { int class1_alg_type; int class2_alg_type; int alg_op; +}; + +struct caam_aead_alg { + struct aead_alg aead; + struct caam_alg_entry caam; + bool registered; +}; + +static struct caam_aead_alg driver_aeads[] = { + { + .aead = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc4543(gcm(aes))", + .cra_driver_name = "rfc4543-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4543_setkey, + .setauthsize = rfc4543_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + }, + /* Galois Counter Mode */ + { + .aead = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + }, +}; + +struct caam_crypto_alg { struct crypto_alg crypto_alg; + struct list_head entry; + struct caam_alg_entry caam; }; -static int caam_cra_init(struct crypto_tfm *tfm) +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) { - struct crypto_alg *alg = tfm->__crt_alg; - struct caam_crypto_alg *caam_alg = - container_of(alg, struct caam_crypto_alg, crypto_alg); - struct caam_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->jrdev = caam_jr_alloc(); if (IS_ERR(ctx->jrdev)) { pr_err("Job Ring Device allocation for transform failed\n"); @@ -4138,17 +4120,35 @@ static int caam_cra_init(struct crypto_tfm *tfm) } /* copy descriptor header template value */ - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; + ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; + ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; + ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op; return 0; } -static void caam_cra_exit(struct crypto_tfm *tfm) +static int caam_cra_init(struct crypto_tfm *tfm) { + struct crypto_alg *alg = tfm->__crt_alg; + struct caam_crypto_alg *caam_alg = + container_of(alg, struct caam_crypto_alg, crypto_alg); struct caam_ctx *ctx = crypto_tfm_ctx(tfm); + return caam_init_common(ctx, &caam_alg->caam); +} + +static int caam_aead_init(struct crypto_aead *tfm) +{ + struct aead_alg *alg = crypto_aead_alg(tfm); + struct caam_aead_alg *caam_alg = + container_of(alg, struct caam_aead_alg, aead); + struct caam_ctx *ctx = crypto_aead_ctx(tfm); + + return caam_init_common(ctx, &caam_alg->caam); +} + +static void caam_exit_common(struct caam_ctx *ctx) +{ if (ctx->sh_desc_enc_dma && !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, @@ -4171,10 +4171,28 @@ static void caam_cra_exit(struct crypto_tfm *tfm) caam_jr_free(ctx->jrdev); } +static void caam_cra_exit(struct crypto_tfm *tfm) +{ + caam_exit_common(crypto_tfm_ctx(tfm)); +} + +static void caam_aead_exit(struct crypto_aead *tfm) +{ + caam_exit_common(crypto_aead_ctx(tfm)); +} + static void __exit caam_algapi_exit(void) { struct caam_crypto_alg *t_alg, *n; + int i; + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + + if (t_alg->registered) + crypto_unregister_aead(&t_alg->aead); + } if (!alg_list.next) return; @@ -4227,13 +4245,26 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template break; } - t_alg->class1_alg_type = template->class1_alg_type; - t_alg->class2_alg_type = template->class2_alg_type; - t_alg->alg_op = template->alg_op; + t_alg->caam.class1_alg_type = template->class1_alg_type; + t_alg->caam.class2_alg_type = template->class2_alg_type; + t_alg->caam.alg_op = template->alg_op; return t_alg; } +static void caam_aead_alg_init(struct caam_aead_alg *t_alg) +{ + struct aead_alg *alg = &t_alg->aead; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + + alg->init = caam_aead_init; + alg->exit = caam_aead_exit; +} + static int __init caam_algapi_init(void) { struct device_node *dev_node; @@ -4241,6 +4272,7 @@ static int __init caam_algapi_init(void) struct device *ctrldev; void *priv; int i = 0, err = 0; + bool registered = false; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!dev_node) { @@ -4287,10 +4319,30 @@ static int __init caam_algapi_init(void) pr_warn("%s alg registration failed\n", t_alg->crypto_alg.cra_driver_name); kfree(t_alg); - } else - list_add_tail(&t_alg->entry, &alg_list); + continue; + } + + list_add_tail(&t_alg->entry, &alg_list); + registered = true; + } + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + + caam_aead_alg_init(t_alg); + + err = crypto_register_aead(&t_alg->aead); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->aead.base.cra_driver_name); + continue; + } + + t_alg->registered = true; + registered = true; } - if (!list_empty(&alg_list)) + + if (registered) pr_info("caam algorithms registered in /proc/crypto\n"); return err; -- cgit v1.2.3 From f657f82cc9710e2cb3067be932853ce114e5ce29 Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Tue, 16 Jun 2015 12:59:07 +0200 Subject: crypto: caam - fix non-64-bit write/read access The patch crypto: caam - Add definition of rd/wr_reg64 for little endian platform added support for little endian platforms to the CAAM driver. Namely a write and read function for 64 bit registers. The only user of this functions is the Job Ring driver (drivers/crypto/caam/jr.c). It uses the functions to set the DMA addresses for the input/output rings. However, at least in the default configuration, the least significant 32 bits are always in the base+0x0004 address; independent of the endianness of the bytes itself. That means the addresses do not change with the system endianness. DMA addresses are only 32 bits wide on non-64-bit systems, writing the upper 32 bits of this value to the register for the least significant bits results in the DMA address being set to 0. Fix this by always writing the registers in the same way. Suggested-by: Russell King Signed-off-by: Steffen Trumtrar Signed-off-by: Herbert Xu --- drivers/crypto/caam/regs.h | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 378ddc17f60e..672c97489505 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -83,35 +83,35 @@ #endif #endif +/* + * The only users of these wr/rd_reg64 functions is the Job Ring (JR). + * The DMA address registers in the JR are a pair of 32-bit registers. + * The layout is: + * + * base + 0x0000 : most-significant 32 bits + * base + 0x0004 : least-significant 32 bits + * + * The 32-bit version of this core therefore has to write to base + 0x0004 + * to set the 32-bit wide DMA address. This seems to be independent of the + * endianness of the written/read data. + */ + #ifndef CONFIG_64BIT -#ifdef __BIG_ENDIAN -static inline void wr_reg64(u64 __iomem *reg, u64 data) -{ - wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); - wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull); -} +#define REG64_MS32(reg) ((u32 __iomem *)(reg)) +#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1) -static inline u64 rd_reg64(u64 __iomem *reg) -{ - return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | - ((u64)rd_reg32((u32 __iomem *)reg + 1)); -} -#else -#ifdef __LITTLE_ENDIAN static inline void wr_reg64(u64 __iomem *reg, u64 data) { - wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32); - wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull); + wr_reg32(REG64_MS32(reg), data >> 32); + wr_reg32(REG64_LS32(reg), data); } static inline u64 rd_reg64(u64 __iomem *reg) { - return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) | - ((u64)rd_reg32((u32 __iomem *)reg)); + return ((u64)rd_reg32(REG64_MS32(reg)) << 32 | + (u64)rd_reg32(REG64_LS32(reg))); } #endif -#endif -#endif /* * jr_outentry -- cgit v1.2.3 From 596103cf8fb0a258e1a5ed3e895860764490934d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 17 Jun 2015 14:58:24 +0800 Subject: crypto: drivers - Fix Kconfig selects This patch fixes a number of problems in crypto driver Kconfig entries: 1. Select BLKCIPHER instead of BLKCIPHER2. The latter is internal and should not be used outside of the crypto API itself. 2. Do not select ALGAPI unless you use a legacy type like CRYPTO_ALG_TYPE_CIPHER. 3. Select the algorithm type that you are implementing, e.g., AEAD. 4. Do not select generic C code such as CBC/ECB unless you use them as a fallback. 5. Remove default n since that is the default default. Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 41 +++++++++++++---------------------------- drivers/crypto/caam/Kconfig | 5 ++--- drivers/crypto/ccp/Kconfig | 1 - drivers/crypto/nx/Kconfig | 8 -------- drivers/crypto/qat/Kconfig | 6 ++---- drivers/crypto/ux500/Kconfig | 4 ++-- 6 files changed, 19 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 92c899fbfe27..7a72797f6f58 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -162,9 +162,8 @@ config CRYPTO_GHASH_S390 config CRYPTO_DEV_MV_CESA tristate "Marvell's Cryptographic Engine" depends on PLAT_ORION - select CRYPTO_ALGAPI select CRYPTO_AES - select CRYPTO_BLKCIPHER2 + select CRYPTO_BLKCIPHER select CRYPTO_HASH help This driver allows you to utilize the Cryptographic Engines and @@ -176,7 +175,8 @@ config CRYPTO_DEV_MV_CESA config CRYPTO_DEV_NIAGARA2 tristate "Niagara2 Stream Processing Unit driver" select CRYPTO_DES - select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + select CRYPTO_HASH depends on SPARC64 help Each core of a Niagara2 processor contains a Stream @@ -189,7 +189,6 @@ config CRYPTO_DEV_NIAGARA2 config CRYPTO_DEV_HIFN_795X tristate "Driver HIFN 795x crypto accelerator chips" select CRYPTO_DES - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG depends on PCI @@ -208,8 +207,10 @@ source drivers/crypto/caam/Kconfig config CRYPTO_DEV_TALITOS tristate "Talitos Freescale Security Engine (SEC)" - select CRYPTO_ALGAPI + select CRYPTO_AEAD select CRYPTO_AUTHENC + select CRYPTO_BLKCIPHER + select CRYPTO_HASH select HW_RANDOM depends on FSL_SOC help @@ -244,7 +245,7 @@ config CRYPTO_DEV_IXP4XX tristate "Driver for IXP4xx crypto hardware acceleration" depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE select CRYPTO_DES - select CRYPTO_ALGAPI + select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER help @@ -254,7 +255,6 @@ config CRYPTO_DEV_PPC4XX tristate "Driver AMCC PPC4xx crypto accelerator" depends on PPC && 4xx select CRYPTO_HASH - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER help This option allows you to have support for AMCC crypto acceleration. @@ -275,7 +275,7 @@ config CRYPTO_DEV_OMAP_AES tristate "Support for OMAP AES hw engine" depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS select CRYPTO_AES - select CRYPTO_BLKCIPHER2 + select CRYPTO_BLKCIPHER help OMAP processors have AES module accelerator. Select this if you want to use the OMAP module for AES algorithms. @@ -284,7 +284,7 @@ config CRYPTO_DEV_OMAP_DES tristate "Support for OMAP DES3DES hw engine" depends on ARCH_OMAP2PLUS select CRYPTO_DES - select CRYPTO_BLKCIPHER2 + select CRYPTO_BLKCIPHER help OMAP processors have DES/3DES module accelerator. Select this if you want to use the OMAP module for DES and 3DES algorithms. Currently @@ -294,9 +294,10 @@ config CRYPTO_DEV_OMAP_DES config CRYPTO_DEV_PICOXCELL tristate "Support for picoXcell IPSEC and Layer2 crypto engines" depends on ARCH_PICOXCELL && HAVE_CLK + select CRYPTO_AEAD select CRYPTO_AES select CRYPTO_AUTHENC - select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER select CRYPTO_DES select CRYPTO_CBC select CRYPTO_ECB @@ -322,7 +323,6 @@ config CRYPTO_DEV_S5P tristate "Support for Samsung S5PV210/Exynos crypto accelerator" depends on ARCH_S5PV210 || ARCH_EXYNOS select CRYPTO_AES - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER help This option allows you to have support for S5P crypto acceleration. @@ -345,7 +345,6 @@ endif config CRYPTO_DEV_UX500 tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" depends on ARCH_U8500 - select CRYPTO_ALGAPI help Driver for ST-Ericsson UX500 crypto engine. @@ -363,10 +362,7 @@ config CRYPTO_DEV_BFIN_CRC config CRYPTO_DEV_ATMEL_AES tristate "Support for Atmel AES hw accelerator" depends on ARCH_AT91 - select CRYPTO_CBC - select CRYPTO_ECB select CRYPTO_AES - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER select AT_HDMAC help @@ -381,9 +377,6 @@ config CRYPTO_DEV_ATMEL_TDES tristate "Support for Atmel DES/TDES hw accelerator" depends on ARCH_AT91 select CRYPTO_DES - select CRYPTO_CBC - select CRYPTO_ECB - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER help Some Atmel processors have DES/TDES hw accelerator. @@ -396,10 +389,7 @@ config CRYPTO_DEV_ATMEL_TDES config CRYPTO_DEV_ATMEL_SHA tristate "Support for Atmel SHA hw accelerator" depends on ARCH_AT91 - select CRYPTO_SHA1 - select CRYPTO_SHA256 - select CRYPTO_SHA512 - select CRYPTO_ALGAPI + select CRYPTO_HASH help Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512 hw accelerator. @@ -412,7 +402,6 @@ config CRYPTO_DEV_ATMEL_SHA config CRYPTO_DEV_CCP bool "Support for AMD Cryptographic Coprocessor" depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM - default n help The AMD Cryptographic Coprocessor provides hardware support for encryption, hashing and related operations. @@ -424,13 +413,11 @@ endif config CRYPTO_DEV_MXS_DCP tristate "Support for Freescale MXS DCP" depends on ARCH_MXS - select CRYPTO_SHA1 - select CRYPTO_SHA256 select CRYPTO_CBC select CRYPTO_ECB select CRYPTO_AES select CRYPTO_BLKCIPHER - select CRYPTO_ALGAPI + select CRYPTO_HASH help The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB co-processor on the die. @@ -449,7 +436,6 @@ config CRYPTO_DEV_QCE select CRYPTO_CBC select CRYPTO_XTS select CRYPTO_CTR - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER help This driver supports Qualcomm crypto engine accelerator @@ -468,7 +454,6 @@ config CRYPTO_DEV_IMGTEC_HASH tristate "Imagination Technologies hardware hash accelerator" depends on MIPS || COMPILE_TEST depends on HAS_DMA - select CRYPTO_ALGAPI select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_SHA256 diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e7555ff4cafd..e286e285aa8a 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -45,7 +45,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE config CRYPTO_DEV_FSL_CAAM_INTC bool "Job Ring interrupt coalescing" depends on CRYPTO_DEV_FSL_CAAM_JR - default n help Enable the Job Ring's interrupt coalescing feature. @@ -77,8 +76,9 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API tristate "Register algorithm implementations with the Crypto API" depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR default y - select CRYPTO_ALGAPI + select CRYPTO_AEAD select CRYPTO_AUTHENC + select CRYPTO_BLKCIPHER help Selecting this will offload crypto for users of the scatterlist crypto API (such as the linux native IPSec @@ -115,7 +115,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API config CRYPTO_DEV_FSL_CAAM_DEBUG bool "Enable debug output in CAAM driver" depends on CRYPTO_DEV_FSL_CAAM - default n help Selecting this will enable printing of various debug information in the CAAM driver. diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 7639ffc36c68..ae38f6b6cc10 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -13,7 +13,6 @@ config CRYPTO_DEV_CCP_CRYPTO tristate "Encryption and hashing acceleration support" depends on CRYPTO_DEV_CCP_DD default m - select CRYPTO_ALGAPI select CRYPTO_HASH select CRYPTO_BLKCIPHER select CRYPTO_AUTHENC diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index 3e621ad09675..b1addf6223db 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig @@ -3,16 +3,8 @@ config CRYPTO_DEV_NX_ENCRYPT tristate "Encryption acceleration support on pSeries platform" depends on PPC_PSERIES && IBMVIO && !CPU_LITTLE_ENDIAN default y - select CRYPTO_ALGAPI select CRYPTO_AES - select CRYPTO_CBC - select CRYPTO_ECB select CRYPTO_CCM - select CRYPTO_GCM - select CRYPTO_AUTHENC - select CRYPTO_XCBC - select CRYPTO_SHA256 - select CRYPTO_SHA512 help Support for PowerPC Nest (NX) encryption acceleration. This module supports acceleration for AES and SHA2 algorithms on diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 49bede2a9f77..6fdb9e8b22a7 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -2,9 +2,8 @@ config CRYPTO_DEV_QAT tristate select CRYPTO_AEAD select CRYPTO_AUTHENC - select CRYPTO_ALGAPI - select CRYPTO_AES - select CRYPTO_CBC + select CRYPTO_BLKCIPHER + select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 @@ -13,7 +12,6 @@ config CRYPTO_DEV_QAT config CRYPTO_DEV_QAT_DH895xCC tristate "Support for Intel(R) DH895xCC" depends on X86 && PCI - default n select CRYPTO_DEV_QAT help Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index b35e5c4b025a..30796441b0a6 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -7,6 +7,8 @@ config CRYPTO_DEV_UX500_CRYP tristate "UX500 crypto driver for CRYP block" depends on CRYPTO_DEV_UX500 + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER select CRYPTO_DES help This selects the crypto driver for the UX500_CRYP hardware. It supports @@ -16,7 +18,6 @@ config CRYPTO_DEV_UX500_HASH tristate "UX500 crypto driver for HASH block" depends on CRYPTO_DEV_UX500 select CRYPTO_HASH - select CRYPTO_HMAC help This selects the hash driver for the UX500_HASH hardware. Depends on UX500/STM DMA if running in DMA mode. @@ -24,7 +25,6 @@ config CRYPTO_DEV_UX500_HASH config CRYPTO_DEV_UX500_DEBUG bool "Activate ux500 platform debug-mode for crypto and hash block" depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH - default n help Say Y if you want to add debug prints to ux500_hash and ux500_cryp devices. -- cgit v1.2.3 From 87e51b072485f519c6ef077f60f4b837677202cc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 18 Jun 2015 14:25:55 +0800 Subject: crypto: caam - Reintroduce DESC_MAX_USED_BYTES I incorrectly removed DESC_MAX_USED_BYTES when enlarging the size of the shared descriptor buffers, thus making it four times larger than what is necessary. This patch restores the division by four calculation. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index f206521d7525..789c385f2e47 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -100,7 +100,8 @@ #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 15 * CAAM_CMD_SZ) -#define DESC_MAX_USED_LEN (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) +#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) #ifdef DEBUG /* for print_hex_dumps with line references */ -- cgit v1.2.3 From 7793bda8fe372fe2e71f16d0619b630d6e41c8ba Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 18 Jun 2015 14:25:56 +0800 Subject: crypto: caam - Set last bit on src SG list The new aead_edesc_alloc left out the bit indicating the last entry on the source SG list. This patch fixes it. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 789c385f2e47..daca933a82ec 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -2624,7 +2624,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_index = 0; if (!all_contig) { - sg_to_sec4_sg(req->src, src_nents, + sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg + sec4_sg_index, 0); sec4_sg_index += src_nents; } -- cgit v1.2.3 From c47d63020c03659e584673f78f24f2e5de3e6b9b Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 18 Jun 2015 12:05:30 -0400 Subject: crypto: nx - add LE support to pSeries platform driver Add support to the nx-842-pseries.c driver for running in little endian mode. The pSeries platform NX 842 driver currently only works as big endian. This adds cpu_to_be*() and be*_to_cpu() in the appropriate places to work in LE mode also. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- drivers/crypto/nx/Kconfig | 2 +- drivers/crypto/nx/nx-842-pseries.c | 83 +++++++++++++++++++------------------- drivers/crypto/nx/nx-842.h | 3 +- 3 files changed, 45 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index b1addf6223db..e421c96c763a 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig @@ -24,7 +24,7 @@ if CRYPTO_DEV_NX_COMPRESS config CRYPTO_DEV_NX_COMPRESS_PSERIES tristate "Compression acceleration support on pSeries platform" - depends on PPC_PSERIES && IBMVIO && !CPU_LITTLE_ENDIAN + depends on PPC_PSERIES && IBMVIO default y help Support for PowerPC Nest (NX) compression acceleration. This diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index da52d8edefb3..3040a6091bf2 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -166,8 +166,8 @@ static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) } struct nx842_slentry { - unsigned long ptr; /* Real address (use __pa()) */ - unsigned long len; + __be64 ptr; /* Real address (use __pa()) */ + __be64 len; }; /* pHyp scatterlist entry */ @@ -186,30 +186,21 @@ static inline unsigned long nx842_get_scatterlist_size( static int nx842_build_scatterlist(unsigned long buf, int len, struct nx842_scatterlist *sl) { - unsigned long nextpage; + unsigned long entrylen; struct nx842_slentry *entry; sl->entry_nr = 0; entry = sl->entries; while (len) { - entry->ptr = nx842_get_pa((void *)buf); - nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); - if (nextpage < buf + len) { - /* we aren't at the end yet */ - if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE)) - /* we are in the middle (or beginning) */ - entry->len = NX842_HW_PAGE_SIZE; - else - /* we are at the beginning */ - entry->len = nextpage - buf; - } else { - /* at the end */ - entry->len = len; - } - - len -= entry->len; - buf += entry->len; + entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf)); + entrylen = min_t(int, len, + LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE)); + entry->len = cpu_to_be64(entrylen); + + len -= entrylen; + buf += entrylen; + sl->entry_nr++; entry++; } @@ -230,8 +221,8 @@ static int nx842_validate_result(struct device *dev, csb->completion_code, csb->completion_extension); dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", - csb->processed_byte_count, - (unsigned long)csb->address); + be32_to_cpu(csb->processed_byte_count), + (unsigned long)be64_to_cpu(csb->address)); return -EIO; } @@ -338,7 +329,6 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, csbcpb = &workmem->csbcpb; memset(csbcpb, 0, sizeof(*csbcpb)); op.csbcpb = nx842_get_pa(csbcpb); - op.out = nx842_get_pa(slout.entries); if ((inbuf & NX842_HW_PAGE_MASK) == ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { @@ -364,6 +354,10 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, op.outlen = -nx842_get_scatterlist_size(&slout); } + dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", + __func__, (unsigned long)op.in, (long)op.inlen, + (unsigned long)op.out, (long)op.outlen); + /* Send request to pHyp */ ret = vio_h_cop_sync(local_devdata->vdev, &op); @@ -380,7 +374,7 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, if (ret) goto unlock; - *outlen = csbcpb->csb.processed_byte_count; + *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen); unlock: @@ -493,6 +487,10 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, op.outlen = -nx842_get_scatterlist_size(&slout); } + dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", + __func__, (unsigned long)op.in, (long)op.inlen, + (unsigned long)op.out, (long)op.outlen); + /* Send request to pHyp */ ret = vio_h_cop_sync(local_devdata->vdev, &op); @@ -508,7 +506,7 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, if (ret) goto unlock; - *outlen = csbcpb->csb.processed_byte_count; + *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); unlock: if (ret) @@ -600,16 +598,16 @@ static int nx842_OF_upd_status(struct nx842_devdata *devdata, static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, struct property *prop) { int ret = 0; - const int *maxsglen = prop->value; + const unsigned int maxsglen = of_read_number(prop->value, 1); - if (prop->length != sizeof(*maxsglen)) { + if (prop->length != sizeof(maxsglen)) { dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, - prop->length, sizeof(*maxsglen)); + prop->length, sizeof(maxsglen)); ret = -EINVAL; } else { - devdata->max_sg_len = (unsigned int)min(*maxsglen, - (int)NX842_HW_PAGE_SIZE); + devdata->max_sg_len = min_t(unsigned int, + maxsglen, NX842_HW_PAGE_SIZE); } return ret; @@ -648,13 +646,15 @@ static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, struct property *prop) { int ret = 0; + unsigned int comp_data_limit, decomp_data_limit; + unsigned int comp_sg_limit, decomp_sg_limit; const struct maxsynccop_t { - int comp_elements; - int comp_data_limit; - int comp_sg_limit; - int decomp_elements; - int decomp_data_limit; - int decomp_sg_limit; + __be32 comp_elements; + __be32 comp_data_limit; + __be32 comp_sg_limit; + __be32 decomp_elements; + __be32 decomp_data_limit; + __be32 decomp_sg_limit; } *maxsynccop; if (prop->length != sizeof(*maxsynccop)) { @@ -666,14 +666,16 @@ static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, } maxsynccop = (const struct maxsynccop_t *)prop->value; + comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit); + comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit); + decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit); + decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit); /* Use one limit rather than separate limits for compression and * decompression. Set a maximum for this so as not to exceed the * size that the header can support and round the value down to * the hardware page size (4K) */ - devdata->max_sync_size = - (unsigned int)min(maxsynccop->comp_data_limit, - maxsynccop->decomp_data_limit); + devdata->max_sync_size = min(comp_data_limit, decomp_data_limit); devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, 65536); @@ -689,8 +691,7 @@ static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, nx842_pseries_constraints.maximum = devdata->max_sync_size; - devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit, - maxsynccop->decomp_sg_limit); + devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit); if (devdata->max_sync_sg < 1) { dev_err(devdata->dev, "%s: hardware max sg size (%u) is " "less than the driver minimum, unable to use " diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index f6821b65b7ce..ac0ea79d0f8b 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -88,7 +88,8 @@ #define CCB_CM_EXTRA_WRITE (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_STORE) #define CCB_CM_INTERRUPT (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_INTERRUPT) -#define LEN_ON_PAGE(pa) (PAGE_SIZE - ((pa) & ~PAGE_MASK)) +#define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1))) +#define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE) static inline unsigned long nx842_get_pa(void *addr) { -- cgit v1.2.3 From 51b44fc81178136bca88565dad07c067c8dc51da Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:18 +0200 Subject: crypto: mv_cesa - use gen_pool to reserve the SRAM memory region The mv_cesa driver currently expects the SRAM memory region to be passed as a platform device resource. This approach implies two drawbacks: - the DT representation is wrong - the only one that can access the SRAM is the crypto engine The last point is particularly annoying in some cases: for example on armada 370, a small region of the crypto SRAM is used to implement the cpuidle, which means you would not be able to enable both cpuidle and the CESA driver. To address that problem, we explicitly define the SRAM device in the DT and then reference the sram node from the crypto engine node. Also note that the old way of retrieving the SRAM memory region is still supported, or in other words, backward compatibility is preserved. Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/mv_cesa.txt | 24 ++++++--- drivers/crypto/Kconfig | 1 + drivers/crypto/mv_cesa.c | 58 ++++++++++++++++------ 3 files changed, 60 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/crypto/mv_cesa.txt b/Documentation/devicetree/bindings/crypto/mv_cesa.txt index eaa2873e7b25..13b8fc5ddcf2 100644 --- a/Documentation/devicetree/bindings/crypto/mv_cesa.txt +++ b/Documentation/devicetree/bindings/crypto/mv_cesa.txt @@ -2,21 +2,29 @@ Marvell Cryptographic Engines And Security Accelerator Required properties: - compatible : should be "marvell,orion-crypto" -- reg : base physical address of the engine and length of memory mapped - region, followed by base physical address of sram and its memory - length -- reg-names : "regs" , "sram"; -- interrupts : interrupt number +- reg: base physical address of the engine and length of memory mapped + region. Can also contain an entry for the SRAM attached to the CESA, + but this representation is deprecated and marvell,crypto-srams should + be used instead +- reg-names: "regs". Can contain an "sram" entry, but this representation + is deprecated and marvell,crypto-srams should be used instead +- interrupts: interrupt number - clocks: reference to the crypto engines clocks. This property is only required for Dove platforms +- marvell,crypto-srams: phandle to crypto SRAM definitions + +Optional properties: +- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not + specified the whole SRAM is used (2KB) Examples: crypto@30000 { compatible = "marvell,orion-crypto"; - reg = <0x30000 0x10000>, - <0x4000000 0x800>; - reg-names = "regs" , "sram"; + reg = <0x30000 0x10000>; + reg-names = "regs"; interrupts = <22>; + marvell,crypto-srams = <&crypto_sram>; + marvell,crypto-sram-size = <0x600>; status = "okay"; }; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 7a72797f6f58..31e7b7ce5990 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -165,6 +165,7 @@ config CRYPTO_DEV_MV_CESA select CRYPTO_AES select CRYPTO_BLKCIPHER select CRYPTO_HASH + select SRAM help This driver allows you to utilize the Cryptographic Engines and Security Accelerator (CESA) which can be found on the Marvell Orion diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index eb645c2cf3eb..e31d82c1eebe 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,8 @@ #define MAX_HW_HASH_SIZE 0xFFFF #define MV_CESA_EXPIRE 500 /* msec */ +#define MV_CESA_DEFAULT_SRAM_SIZE 2048 + /* * STM: * /---------------------------------------\ @@ -83,6 +86,8 @@ struct req_progress { struct crypto_priv { void __iomem *reg; void __iomem *sram; + struct gen_pool *sram_pool; + dma_addr_t sram_dma; int irq; struct clk *clk; struct task_struct *queue_th; @@ -1019,6 +1024,39 @@ static struct ahash_alg mv_hmac_sha1_alg = { } }; +static int mv_cesa_get_sram(struct platform_device *pdev, + struct crypto_priv *cp) +{ + struct resource *res; + u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE; + + of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size", + &sram_size); + + cp->sram_size = sram_size; + cp->sram_pool = of_get_named_gen_pool(&pdev->dev.of_node, + "marvell,crypto-srams", 0); + if (cp->sram_pool) { + cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size, + &cp->sram_dma); + if (cp->sram) + return 0; + + return -ENOMEM; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "sram"); + if (!res || resource_size(res) < cp->sram_size) + return -EINVAL; + + cp->sram = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cp->sram)) + return PTR_ERR(cp->sram); + + return 0; +} + static int mv_probe(struct platform_device *pdev) { struct crypto_priv *cp; @@ -1047,18 +1085,11 @@ static int mv_probe(struct platform_device *pdev) goto err; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); - if (!res) { - ret = -ENXIO; + ret = mv_cesa_get_sram(pdev, cp); + if (ret) goto err; - } - cp->sram_size = resource_size(res); + cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; - cp->sram = ioremap(res->start, cp->sram_size); - if (!cp->sram) { - ret = -ENOMEM; - goto err; - } if (pdev->dev.of_node) irq = irq_of_parse_and_map(pdev->dev.of_node, 0); @@ -1066,7 +1097,7 @@ static int mv_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0 || irq == NO_IRQ) { ret = irq; - goto err_unmap_sram; + goto err; } cp->irq = irq; @@ -1076,7 +1107,7 @@ static int mv_probe(struct platform_device *pdev) cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); if (IS_ERR(cp->queue_th)) { ret = PTR_ERR(cp->queue_th); - goto err_unmap_sram; + goto err; } ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), @@ -1134,8 +1165,6 @@ err_irq: } err_thread: kthread_stop(cp->queue_th); -err_unmap_sram: - iounmap(cp->sram); err: kfree(cp); cpg = NULL; @@ -1155,7 +1184,6 @@ static int mv_remove(struct platform_device *pdev) kthread_stop(cp->queue_th); free_irq(cp->irq, cp); memset(cp->sram, 0, cp->sram_size); - iounmap(cp->sram); if (!IS_ERR(cp->clk)) { clk_disable_unprepare(cp->clk); -- cgit v1.2.3 From 1fa2e9ae1d3782bd8f737487dc6306ba16b4d016 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:19 +0200 Subject: crypto: mv_cesa - explicitly define kirkwood and dove compatible strings We are about to add a new driver to support new features like using the TDMA engine to offload the CPU. Orion, Dove and Kirkwood platforms are already using the mv_cesa driver, but Orion SoCs do not embed the TDMA engine, which means we will have to differentiate them if we want to get TDMA support on Dove and Kirkwood. In the other hand, the migration from the old driver to the new one is not something all people are willing to do without first auditing the new driver. Hence we have to support the new compatible in the mv_cesa driver so that new platforms with updated DTs can still attach their crypto engine device to this driver. Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/crypto/mv_cesa.txt | 5 ++++- drivers/crypto/mv_cesa.c | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/crypto/mv_cesa.txt b/Documentation/devicetree/bindings/crypto/mv_cesa.txt index 13b8fc5ddcf2..c0c35f00335b 100644 --- a/Documentation/devicetree/bindings/crypto/mv_cesa.txt +++ b/Documentation/devicetree/bindings/crypto/mv_cesa.txt @@ -1,7 +1,10 @@ Marvell Cryptographic Engines And Security Accelerator Required properties: -- compatible : should be "marvell,orion-crypto" +- compatible: should be one of the following string + "marvell,orion-crypto" + "marvell,kirkwood-crypto" + "marvell,dove-crypto" - reg: base physical address of the engine and length of memory mapped region. Can also contain an entry for the SRAM attached to the CESA, but this representation is deprecated and marvell,crypto-srams should diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index e31d82c1eebe..5bcd575fa96f 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -1034,7 +1034,7 @@ static int mv_cesa_get_sram(struct platform_device *pdev, &sram_size); cp->sram_size = sram_size; - cp->sram_pool = of_get_named_gen_pool(&pdev->dev.of_node, + cp->sram_pool = of_get_named_gen_pool(pdev->dev.of_node, "marvell,crypto-srams", 0); if (cp->sram_pool) { cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size, @@ -1197,6 +1197,8 @@ static int mv_remove(struct platform_device *pdev) static const struct of_device_id mv_cesa_of_match_table[] = { { .compatible = "marvell,orion-crypto", }, + { .compatible = "marvell,kirkwood-crypto", }, + { .compatible = "marvell,dove-crypto", }, {} }; MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); -- cgit v1.2.3 From f63601fd616ab370774fa00ea10bcaaa9e48e84c Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:20 +0200 Subject: crypto: marvell/cesa - add a new driver for Marvell's CESA The existing mv_cesa driver supports some features of the CESA IP but is quite limited, and reworking it to support new features (like involving the TDMA engine to offload the CPU) is almost impossible. This driver has been rewritten from scratch to take those new features into account. This commit introduce the base infrastructure allowing us to add support for DMA optimization. It also includes support for one hash (SHA1) and one cipher (AES) algorithm, and enable those features on the Armada 370 SoC. Other algorithms and platforms will be added later on. Signed-off-by: Boris Brezillon Signed-off-by: Arnaud Ebalard Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 15 + drivers/crypto/Makefile | 1 + drivers/crypto/marvell/Makefile | 2 + drivers/crypto/marvell/cesa.c | 417 +++++++++++++++++++++++ drivers/crypto/marvell/cesa.h | 554 +++++++++++++++++++++++++++++++ drivers/crypto/marvell/cipher.c | 331 ++++++++++++++++++ drivers/crypto/marvell/hash.c | 720 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 2040 insertions(+) create mode 100644 drivers/crypto/marvell/Makefile create mode 100644 drivers/crypto/marvell/cesa.c create mode 100644 drivers/crypto/marvell/cesa.h create mode 100644 drivers/crypto/marvell/cipher.c create mode 100644 drivers/crypto/marvell/hash.c (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 31e7b7ce5990..dbb35bbefaf3 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -173,6 +173,21 @@ config CRYPTO_DEV_MV_CESA Currently the driver supports AES in ECB and CBC mode without DMA. +config CRYPTO_DEV_MARVELL_CESA + tristate "New Marvell's Cryptographic Engine driver" + depends on (PLAT_ORION || ARCH_MVEBU || COMPILE_TEST) && HAS_DMA && HAS_IOMEM + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_BLKCIPHER + select CRYPTO_HASH + select SRAM + help + This driver allows you to utilize the Cryptographic Engines and + Security Accelerator (CESA) which can be found on the Armada 370. + + This driver is aimed at replacing the mv_cesa driver. This will only + happen once it has received proper testing. + config CRYPTO_DEV_NIAGARA2 tristate "Niagara2 Stream Processing Unit driver" select CRYPTO_DES diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index fb84be7e6be5..e35c07a8da85 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o +obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o n2_crypto-y := n2_core.o n2_asm.o diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile new file mode 100644 index 000000000000..68d0982c3416 --- /dev/null +++ b/drivers/crypto/marvell/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o +marvell-cesa-objs := cesa.o cipher.o hash.o diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c new file mode 100644 index 000000000000..76a694303839 --- /dev/null +++ b/drivers/crypto/marvell/cesa.c @@ -0,0 +1,417 @@ +/* + * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA) + * that can be found on the following platform: Orion, Kirkwood, Armada. This + * driver supports the TDMA engine on platforms on which it is available. + * + * Author: Boris Brezillon + * Author: Arnaud Ebalard + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cesa.h" + +struct mv_cesa_dev *cesa_dev; + +static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) +{ + struct crypto_async_request *req, *backlog; + struct mv_cesa_ctx *ctx; + + spin_lock_bh(&cesa_dev->lock); + backlog = crypto_get_backlog(&cesa_dev->queue); + req = crypto_dequeue_request(&cesa_dev->queue); + engine->req = req; + spin_unlock_bh(&cesa_dev->lock); + + if (!req) + return; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + ctx = crypto_tfm_ctx(req->tfm); + ctx->ops->prepare(req, engine); + ctx->ops->step(req); +} + +static irqreturn_t mv_cesa_int(int irq, void *priv) +{ + struct mv_cesa_engine *engine = priv; + struct crypto_async_request *req; + struct mv_cesa_ctx *ctx; + u32 status, mask; + irqreturn_t ret = IRQ_NONE; + + while (true) { + int res; + + mask = mv_cesa_get_int_mask(engine); + status = readl(engine->regs + CESA_SA_INT_STATUS); + + if (!(status & mask)) + break; + + /* + * TODO: avoid clearing the FPGA_INT_STATUS if this not + * relevant on some platforms. + */ + writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); + writel(~status, engine->regs + CESA_SA_INT_STATUS); + + ret = IRQ_HANDLED; + spin_lock_bh(&engine->lock); + req = engine->req; + spin_unlock_bh(&engine->lock); + if (req) { + ctx = crypto_tfm_ctx(req->tfm); + res = ctx->ops->process(req, status & mask); + if (res != -EINPROGRESS) { + spin_lock_bh(&engine->lock); + engine->req = NULL; + mv_cesa_dequeue_req_unlocked(engine); + spin_unlock_bh(&engine->lock); + ctx->ops->cleanup(req); + local_bh_disable(); + req->complete(req, res); + local_bh_enable(); + } else { + ctx->ops->step(req); + } + } + } + + return ret; +} + +int mv_cesa_queue_req(struct crypto_async_request *req) +{ + int ret; + int i; + + spin_lock_bh(&cesa_dev->lock); + ret = crypto_enqueue_request(&cesa_dev->queue, req); + spin_unlock_bh(&cesa_dev->lock); + + if (ret != -EINPROGRESS) + return ret; + + for (i = 0; i < cesa_dev->caps->nengines; i++) { + spin_lock_bh(&cesa_dev->engines[i].lock); + if (!cesa_dev->engines[i].req) + mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]); + spin_unlock_bh(&cesa_dev->engines[i].lock); + } + + return -EINPROGRESS; +} + +static int mv_cesa_add_algs(struct mv_cesa_dev *cesa) +{ + int ret; + int i, j; + + for (i = 0; i < cesa->caps->ncipher_algs; i++) { + ret = crypto_register_alg(cesa->caps->cipher_algs[i]); + if (ret) + goto err_unregister_crypto; + } + + for (i = 0; i < cesa->caps->nahash_algs; i++) { + ret = crypto_register_ahash(cesa->caps->ahash_algs[i]); + if (ret) + goto err_unregister_ahash; + } + + return 0; + +err_unregister_ahash: + for (j = 0; j < i; j++) + crypto_unregister_ahash(cesa->caps->ahash_algs[j]); + i = cesa->caps->ncipher_algs; + +err_unregister_crypto: + for (j = 0; j < i; j++) + crypto_unregister_alg(cesa->caps->cipher_algs[j]); + + return ret; +} + +static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) +{ + int i; + + for (i = 0; i < cesa->caps->nahash_algs; i++) + crypto_unregister_ahash(cesa->caps->ahash_algs[i]); + + for (i = 0; i < cesa->caps->ncipher_algs; i++) + crypto_unregister_alg(cesa->caps->cipher_algs[i]); +} + +static struct crypto_alg *armada_370_cipher_algs[] = { + &mv_cesa_ecb_aes_alg, + &mv_cesa_cbc_aes_alg, +}; + +static struct ahash_alg *armada_370_ahash_algs[] = { + &mv_sha1_alg, + &mv_ahmac_sha1_alg, +}; + +static const struct mv_cesa_caps armada_370_caps = { + .nengines = 1, + .cipher_algs = armada_370_cipher_algs, + .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), + .ahash_algs = armada_370_ahash_algs, + .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), +}; + +static const struct of_device_id mv_cesa_of_match_table[] = { + { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, + {} +}; +MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); + +static int mv_cesa_get_sram(struct platform_device *pdev, int idx) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + struct mv_cesa_engine *engine = &cesa->engines[idx]; + const char *res_name = "sram"; + struct resource *res; + + engine->pool = of_get_named_gen_pool(cesa->dev->of_node, + "marvell,crypto-srams", + idx); + if (engine->pool) { + engine->sram = gen_pool_dma_alloc(engine->pool, + cesa->sram_size, + &engine->sram_dma); + if (engine->sram) + return 0; + + engine->pool = NULL; + return -ENOMEM; + } + + if (cesa->caps->nengines > 1) { + if (!idx) + res_name = "sram0"; + else + res_name = "sram1"; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_name); + if (!res || resource_size(res) < cesa->sram_size) + return -EINVAL; + + engine->sram = devm_ioremap_resource(cesa->dev, res); + if (IS_ERR(engine->sram)) + return PTR_ERR(engine->sram); + + engine->sram_dma = phys_to_dma(cesa->dev, + (phys_addr_t)res->start); + + return 0; +} + +static void mv_cesa_put_sram(struct platform_device *pdev, int idx) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + struct mv_cesa_engine *engine = &cesa->engines[idx]; + + if (!engine->pool) + return; + + gen_pool_free(engine->pool, (unsigned long)engine->sram, + cesa->sram_size); +} + +static int mv_cesa_probe(struct platform_device *pdev) +{ + const struct mv_cesa_caps *caps = NULL; + const struct mbus_dram_target_info *dram; + const struct of_device_id *match; + struct device *dev = &pdev->dev; + struct mv_cesa_dev *cesa; + struct mv_cesa_engine *engines; + struct resource *res; + int irq, ret, i; + u32 sram_size; + + if (cesa_dev) { + dev_err(&pdev->dev, "Only one CESA device authorized\n"); + return -EEXIST; + } + + if (!dev->of_node) + return -ENOTSUPP; + + match = of_match_node(mv_cesa_of_match_table, dev->of_node); + if (!match || !match->data) + return -ENOTSUPP; + + caps = match->data; + + cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); + if (!cesa) + return -ENOMEM; + + cesa->caps = caps; + cesa->dev = dev; + + sram_size = CESA_SA_DEFAULT_SRAM_SIZE; + of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size", + &sram_size); + if (sram_size < CESA_SA_MIN_SRAM_SIZE) + sram_size = CESA_SA_MIN_SRAM_SIZE; + + cesa->sram_size = sram_size; + cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines), + GFP_KERNEL); + if (!cesa->engines) + return -ENOMEM; + + spin_lock_init(&cesa->lock); + crypto_init_queue(&cesa->queue, 50); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + cesa->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(cesa->regs)) + return -ENOMEM; + + dram = mv_mbus_dram_info_nooverlap(); + + platform_set_drvdata(pdev, cesa); + + for (i = 0; i < caps->nengines; i++) { + struct mv_cesa_engine *engine = &cesa->engines[i]; + char res_name[7]; + + engine->id = i; + spin_lock_init(&engine->lock); + + ret = mv_cesa_get_sram(pdev, i); + if (ret) + goto err_cleanup; + + irq = platform_get_irq(pdev, i); + if (irq < 0) { + ret = irq; + goto err_cleanup; + } + + /* + * Not all platforms can gate the CESA clocks: do not complain + * if the clock does not exist. + */ + snprintf(res_name, sizeof(res_name), "cesa%d", i); + engine->clk = devm_clk_get(dev, res_name); + if (IS_ERR(engine->clk)) { + engine->clk = devm_clk_get(dev, NULL); + if (IS_ERR(engine->clk)) + engine->clk = NULL; + } + + snprintf(res_name, sizeof(res_name), "cesaz%d", i); + engine->zclk = devm_clk_get(dev, res_name); + if (IS_ERR(engine->zclk)) + engine->zclk = NULL; + + ret = clk_prepare_enable(engine->clk); + if (ret) + goto err_cleanup; + + ret = clk_prepare_enable(engine->zclk); + if (ret) + goto err_cleanup; + + engine->regs = cesa->regs + CESA_ENGINE_OFF(i); + + writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS); + writel(CESA_SA_CFG_STOP_DIG_ERR, + cesa->engines[i].regs + CESA_SA_CFG); + writel(engine->sram_dma & CESA_SA_SRAM_MSK, + cesa->engines[i].regs + CESA_SA_DESC_P0); + + ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int, + IRQF_ONESHOT, + dev_name(&pdev->dev), + &cesa->engines[i]); + if (ret) + goto err_cleanup; + } + + cesa_dev = cesa; + + ret = mv_cesa_add_algs(cesa); + if (ret) { + cesa_dev = NULL; + goto err_cleanup; + } + + dev_info(dev, "CESA device successfully registered\n"); + + return 0; + +err_cleanup: + for (i = 0; i < caps->nengines; i++) { + clk_disable_unprepare(cesa->engines[i].zclk); + clk_disable_unprepare(cesa->engines[i].clk); + mv_cesa_put_sram(pdev, i); + } + + return ret; +} + +static int mv_cesa_remove(struct platform_device *pdev) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + int i; + + mv_cesa_remove_algs(cesa); + + for (i = 0; i < cesa->caps->nengines; i++) { + clk_disable_unprepare(cesa->engines[i].zclk); + clk_disable_unprepare(cesa->engines[i].clk); + mv_cesa_put_sram(pdev, i); + } + + return 0; +} + +static struct platform_driver marvell_cesa = { + .probe = mv_cesa_probe, + .remove = mv_cesa_remove, + .driver = { + .owner = THIS_MODULE, + .name = "marvell-cesa", + .of_match_table = mv_cesa_of_match_table, + }, +}; +module_platform_driver(marvell_cesa); + +MODULE_ALIAS("platform:mv_crypto"); +MODULE_AUTHOR("Boris Brezillon "); +MODULE_AUTHOR("Arnaud Ebalard "); +MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h new file mode 100644 index 000000000000..f68057c7d96a --- /dev/null +++ b/drivers/crypto/marvell/cesa.h @@ -0,0 +1,554 @@ +#ifndef __MARVELL_CESA_H__ +#define __MARVELL_CESA_H__ + +#include +#include +#include + +#include + +#define CESA_ENGINE_OFF(i) (((i) * 0x2000)) + +#define CESA_TDMA_BYTE_CNT 0x800 +#define CESA_TDMA_SRC_ADDR 0x810 +#define CESA_TDMA_DST_ADDR 0x820 +#define CESA_TDMA_NEXT_ADDR 0x830 + +#define CESA_TDMA_CONTROL 0x840 +#define CESA_TDMA_DST_BURST GENMASK(2, 0) +#define CESA_TDMA_DST_BURST_32B 3 +#define CESA_TDMA_DST_BURST_128B 4 +#define CESA_TDMA_OUT_RD_EN BIT(4) +#define CESA_TDMA_SRC_BURST GENMASK(8, 6) +#define CESA_TDMA_SRC_BURST_32B (3 << 6) +#define CESA_TDMA_SRC_BURST_128B (4 << 6) +#define CESA_TDMA_CHAIN BIT(9) +#define CESA_TDMA_BYTE_SWAP BIT(11) +#define CESA_TDMA_NO_BYTE_SWAP BIT(11) +#define CESA_TDMA_EN BIT(12) +#define CESA_TDMA_FETCH_ND BIT(13) +#define CESA_TDMA_ACT BIT(14) + +#define CESA_TDMA_CUR 0x870 +#define CESA_TDMA_ERROR_CAUSE 0x8c8 +#define CESA_TDMA_ERROR_MSK 0x8cc + +#define CESA_TDMA_WINDOW_BASE(x) (((x) * 0x8) + 0xa00) +#define CESA_TDMA_WINDOW_CTRL(x) (((x) * 0x8) + 0xa04) + +#define CESA_IVDIG(x) (0xdd00 + ((x) * 4) + \ + (((x) < 5) ? 0 : 0x14)) + +#define CESA_SA_CMD 0xde00 +#define CESA_SA_CMD_EN_CESA_SA_ACCL0 BIT(0) +#define CESA_SA_CMD_EN_CESA_SA_ACCL1 BIT(1) +#define CESA_SA_CMD_DISABLE_SEC BIT(2) + +#define CESA_SA_DESC_P0 0xde04 + +#define CESA_SA_DESC_P1 0xde14 + +#define CESA_SA_CFG 0xde08 +#define CESA_SA_CFG_STOP_DIG_ERR GENMASK(1, 0) +#define CESA_SA_CFG_DIG_ERR_CONT 0 +#define CESA_SA_CFG_DIG_ERR_SKIP 1 +#define CESA_SA_CFG_DIG_ERR_STOP 3 +#define CESA_SA_CFG_CH0_W_IDMA BIT(7) +#define CESA_SA_CFG_CH1_W_IDMA BIT(8) +#define CESA_SA_CFG_ACT_CH0_IDMA BIT(9) +#define CESA_SA_CFG_ACT_CH1_IDMA BIT(10) +#define CESA_SA_CFG_MULTI_PKT BIT(11) +#define CESA_SA_CFG_PARA_DIS BIT(13) + +#define CESA_SA_ACCEL_STATUS 0xde0c +#define CESA_SA_ST_ACT_0 BIT(0) +#define CESA_SA_ST_ACT_1 BIT(1) + +/* + * CESA_SA_FPGA_INT_STATUS looks like a FPGA leftover and is documented only + * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA + * and someone forgot to remove it while switching to the core and moving to + * CESA_SA_INT_STATUS. + */ +#define CESA_SA_FPGA_INT_STATUS 0xdd68 +#define CESA_SA_INT_STATUS 0xde20 +#define CESA_SA_INT_AUTH_DONE BIT(0) +#define CESA_SA_INT_DES_E_DONE BIT(1) +#define CESA_SA_INT_AES_E_DONE BIT(2) +#define CESA_SA_INT_AES_D_DONE BIT(3) +#define CESA_SA_INT_ENC_DONE BIT(4) +#define CESA_SA_INT_ACCEL0_DONE BIT(5) +#define CESA_SA_INT_ACCEL1_DONE BIT(6) +#define CESA_SA_INT_ACC0_IDMA_DONE BIT(7) +#define CESA_SA_INT_ACC1_IDMA_DONE BIT(8) +#define CESA_SA_INT_IDMA_DONE BIT(9) +#define CESA_SA_INT_IDMA_OWN_ERR BIT(10) + +#define CESA_SA_INT_MSK 0xde24 + +#define CESA_SA_DESC_CFG_OP_MAC_ONLY 0 +#define CESA_SA_DESC_CFG_OP_CRYPT_ONLY 1 +#define CESA_SA_DESC_CFG_OP_MAC_CRYPT 2 +#define CESA_SA_DESC_CFG_OP_CRYPT_MAC 3 +#define CESA_SA_DESC_CFG_OP_MSK GENMASK(1, 0) +#define CESA_SA_DESC_CFG_MACM_SHA256 (1 << 4) +#define CESA_SA_DESC_CFG_MACM_HMAC_SHA256 (3 << 4) +#define CESA_SA_DESC_CFG_MACM_MD5 (4 << 4) +#define CESA_SA_DESC_CFG_MACM_SHA1 (5 << 4) +#define CESA_SA_DESC_CFG_MACM_HMAC_MD5 (6 << 4) +#define CESA_SA_DESC_CFG_MACM_HMAC_SHA1 (7 << 4) +#define CESA_SA_DESC_CFG_MACM_MSK GENMASK(6, 4) +#define CESA_SA_DESC_CFG_CRYPTM_DES (1 << 8) +#define CESA_SA_DESC_CFG_CRYPTM_3DES (2 << 8) +#define CESA_SA_DESC_CFG_CRYPTM_AES (3 << 8) +#define CESA_SA_DESC_CFG_CRYPTM_MSK GENMASK(9, 8) +#define CESA_SA_DESC_CFG_DIR_ENC (0 << 12) +#define CESA_SA_DESC_CFG_DIR_DEC (1 << 12) +#define CESA_SA_DESC_CFG_CRYPTCM_ECB (0 << 16) +#define CESA_SA_DESC_CFG_CRYPTCM_CBC (1 << 16) +#define CESA_SA_DESC_CFG_CRYPTCM_MSK BIT(16) +#define CESA_SA_DESC_CFG_3DES_EEE (0 << 20) +#define CESA_SA_DESC_CFG_3DES_EDE (1 << 20) +#define CESA_SA_DESC_CFG_AES_LEN_128 (0 << 24) +#define CESA_SA_DESC_CFG_AES_LEN_192 (1 << 24) +#define CESA_SA_DESC_CFG_AES_LEN_256 (2 << 24) +#define CESA_SA_DESC_CFG_AES_LEN_MSK GENMASK(25, 24) +#define CESA_SA_DESC_CFG_NOT_FRAG (0 << 30) +#define CESA_SA_DESC_CFG_FIRST_FRAG (1 << 30) +#define CESA_SA_DESC_CFG_LAST_FRAG (2 << 30) +#define CESA_SA_DESC_CFG_MID_FRAG (3 << 30) +#define CESA_SA_DESC_CFG_FRAG_MSK GENMASK(31, 30) + +/* + * /-----------\ 0 + * | ACCEL CFG | 4 * 8 + * |-----------| 0x20 + * | CRYPT KEY | 8 * 4 + * |-----------| 0x40 + * | IV IN | 4 * 4 + * |-----------| 0x40 (inplace) + * | IV BUF | 4 * 4 + * |-----------| 0x80 + * | DATA IN | 16 * x (max ->max_req_size) + * |-----------| 0x80 (inplace operation) + * | DATA OUT | 16 * x (max ->max_req_size) + * \-----------/ SRAM size + */ + +/* + * Hashing memory map: + * /-----------\ 0 + * | ACCEL CFG | 4 * 8 + * |-----------| 0x20 + * | Inner IV | 8 * 4 + * |-----------| 0x40 + * | Outer IV | 8 * 4 + * |-----------| 0x60 + * | Output BUF| 8 * 4 + * |-----------| 0x80 + * | DATA IN | 64 * x (max ->max_req_size) + * \-----------/ SRAM size + */ + +#define CESA_SA_CFG_SRAM_OFFSET 0x00 +#define CESA_SA_DATA_SRAM_OFFSET 0x80 + +#define CESA_SA_CRYPT_KEY_SRAM_OFFSET 0x20 +#define CESA_SA_CRYPT_IV_SRAM_OFFSET 0x40 + +#define CESA_SA_MAC_IIV_SRAM_OFFSET 0x20 +#define CESA_SA_MAC_OIV_SRAM_OFFSET 0x40 +#define CESA_SA_MAC_DIG_SRAM_OFFSET 0x60 + +#define CESA_SA_DESC_CRYPT_DATA(offset) \ + cpu_to_le32((CESA_SA_DATA_SRAM_OFFSET + (offset)) | \ + ((CESA_SA_DATA_SRAM_OFFSET + (offset)) << 16)) + +#define CESA_SA_DESC_CRYPT_IV(offset) \ + cpu_to_le32((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) | \ + ((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) << 16)) + +#define CESA_SA_DESC_CRYPT_KEY(offset) \ + cpu_to_le32(CESA_SA_CRYPT_KEY_SRAM_OFFSET + (offset)) + +#define CESA_SA_DESC_MAC_DATA(offset) \ + cpu_to_le32(CESA_SA_DATA_SRAM_OFFSET + (offset)) +#define CESA_SA_DESC_MAC_DATA_MSK GENMASK(15, 0) + +#define CESA_SA_DESC_MAC_TOTAL_LEN(total_len) cpu_to_le32((total_len) << 16) +#define CESA_SA_DESC_MAC_TOTAL_LEN_MSK GENMASK(31, 16) + +#define CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX 0xffff + +#define CESA_SA_DESC_MAC_DIGEST(offset) \ + cpu_to_le32(CESA_SA_MAC_DIG_SRAM_OFFSET + (offset)) +#define CESA_SA_DESC_MAC_DIGEST_MSK GENMASK(15, 0) + +#define CESA_SA_DESC_MAC_FRAG_LEN(frag_len) cpu_to_le32((frag_len) << 16) +#define CESA_SA_DESC_MAC_FRAG_LEN_MSK GENMASK(31, 16) + +#define CESA_SA_DESC_MAC_IV(offset) \ + cpu_to_le32((CESA_SA_MAC_IIV_SRAM_OFFSET + (offset)) | \ + ((CESA_SA_MAC_OIV_SRAM_OFFSET + (offset)) << 16)) + +#define CESA_SA_SRAM_SIZE 2048 +#define CESA_SA_SRAM_PAYLOAD_SIZE (cesa_dev->sram_size - \ + CESA_SA_DATA_SRAM_OFFSET) + +#define CESA_SA_DEFAULT_SRAM_SIZE 2048 +#define CESA_SA_MIN_SRAM_SIZE 1024 + +#define CESA_SA_SRAM_MSK (2048 - 1) + +#define CESA_MAX_HASH_BLOCK_SIZE 64 +#define CESA_HASH_BLOCK_SIZE_MSK (CESA_MAX_HASH_BLOCK_SIZE - 1) + +/** + * struct mv_cesa_sec_accel_desc - security accelerator descriptor + * @config: engine config + * @enc_p: input and output data pointers for a cipher operation + * @enc_len: cipher operation length + * @enc_key_p: cipher key pointer + * @enc_iv: cipher IV pointers + * @mac_src_p: input pointer and total hash length + * @mac_digest: digest pointer and hash operation length + * @mac_iv: hmac IV pointers + * + * Structure passed to the CESA engine to describe the crypto operation + * to be executed. + */ +struct mv_cesa_sec_accel_desc { + u32 config; + u32 enc_p; + u32 enc_len; + u32 enc_key_p; + u32 enc_iv; + u32 mac_src_p; + u32 mac_digest; + u32 mac_iv; +}; + +/** + * struct mv_cesa_blkcipher_op_ctx - cipher operation context + * @key: cipher key + * @iv: cipher IV + * + * Context associated to a cipher operation. + */ +struct mv_cesa_blkcipher_op_ctx { + u32 key[8]; + u32 iv[4]; +}; + +/** + * struct mv_cesa_hash_op_ctx - hash or hmac operation context + * @key: cipher key + * @iv: cipher IV + * + * Context associated to an hash or hmac operation. + */ +struct mv_cesa_hash_op_ctx { + u32 iv[16]; + u32 hash[8]; +}; + +/** + * struct mv_cesa_op_ctx - crypto operation context + * @desc: CESA descriptor + * @ctx: context associated to the crypto operation + * + * Context associated to a crypto operation. + */ +struct mv_cesa_op_ctx { + struct mv_cesa_sec_accel_desc desc; + union { + struct mv_cesa_blkcipher_op_ctx blkcipher; + struct mv_cesa_hash_op_ctx hash; + } ctx; +}; + +struct mv_cesa_engine; + +/** + * struct mv_cesa_caps - CESA device capabilities + * @engines: number of engines + * @cipher_algs: supported cipher algorithms + * @ncipher_algs: number of supported cipher algorithms + * @ahash_algs: supported hash algorithms + * @nahash_algs: number of supported hash algorithms + * + * Structure used to describe CESA device capabilities. + */ +struct mv_cesa_caps { + int nengines; + struct crypto_alg **cipher_algs; + int ncipher_algs; + struct ahash_alg **ahash_algs; + int nahash_algs; +}; + +/** + * struct mv_cesa_dev - CESA device + * @caps: device capabilities + * @regs: device registers + * @sram_size: usable SRAM size + * @lock: device lock + * @queue: crypto request queue + * @engines: array of engines + * + * Structure storing CESA device information. + */ +struct mv_cesa_dev { + const struct mv_cesa_caps *caps; + void __iomem *regs; + struct device *dev; + unsigned int sram_size; + spinlock_t lock; + struct crypto_queue queue; + struct mv_cesa_engine *engines; +}; + +/** + * struct mv_cesa_engine - CESA engine + * @id: engine id + * @regs: engine registers + * @sram: SRAM memory region + * @sram_dma: DMA address of the SRAM memory region + * @lock: engine lock + * @req: current crypto request + * @clk: engine clk + * @zclk: engine zclk + * @max_req_len: maximum chunk length (useful to create the TDMA chain) + * @int_mask: interrupt mask cache + * @pool: memory pool pointing to the memory region reserved in + * SRAM + * + * Structure storing CESA engine information. + */ +struct mv_cesa_engine { + int id; + void __iomem *regs; + void __iomem *sram; + dma_addr_t sram_dma; + spinlock_t lock; + struct crypto_async_request *req; + struct clk *clk; + struct clk *zclk; + size_t max_req_len; + u32 int_mask; + struct gen_pool *pool; +}; + +/** + * struct mv_cesa_req_ops - CESA request operations + * @prepare: prepare a request to be executed on the specified engine + * @process: process a request chunk result (should return 0 if the + * operation, -EINPROGRESS if it needs more steps or an error + * code) + * @step: launch the crypto operation on the next chunk + * @cleanup: cleanup the crypto request (release associated data) + */ +struct mv_cesa_req_ops { + void (*prepare)(struct crypto_async_request *req, + struct mv_cesa_engine *engine); + int (*process)(struct crypto_async_request *req, u32 status); + void (*step)(struct crypto_async_request *req); + void (*cleanup)(struct crypto_async_request *req); +}; + +/** + * struct mv_cesa_ctx - CESA operation context + * @ops: crypto operations + * + * Base context structure inherited by operation specific ones. + */ +struct mv_cesa_ctx { + const struct mv_cesa_req_ops *ops; +}; + +/** + * struct mv_cesa_hash_ctx - CESA hash operation context + * @base: base context structure + * + * Hash context structure. + */ +struct mv_cesa_hash_ctx { + struct mv_cesa_ctx base; +}; + +/** + * struct mv_cesa_hash_ctx - CESA hmac operation context + * @base: base context structure + * @iv: initialization vectors + * + * HMAC context structure. + */ +struct mv_cesa_hmac_ctx { + struct mv_cesa_ctx base; + u32 iv[16]; +}; + +/** + * enum mv_cesa_req_type - request type definitions + * @CESA_STD_REQ: standard request + */ +enum mv_cesa_req_type { + CESA_STD_REQ, +}; + +/** + * struct mv_cesa_req - CESA request + * @type: request type + * @engine: engine associated with this request + */ +struct mv_cesa_req { + enum mv_cesa_req_type type; + struct mv_cesa_engine *engine; +}; + +/** + * struct mv_cesa_ablkcipher_std_req - cipher standard request + * @base: base information + * @op: operation context + * @offset: current operation offset + * @size: size of the crypto operation + */ +struct mv_cesa_ablkcipher_std_req { + struct mv_cesa_req base; + struct mv_cesa_op_ctx op; + unsigned int offset; + unsigned int size; + bool skip_ctx; +}; + +/** + * struct mv_cesa_ablkcipher_req - cipher request + * @req: type specific request information + * @src_nents: number of entries in the src sg list + * @dst_nents: number of entries in the dest sg list + */ +struct mv_cesa_ablkcipher_req { + union { + struct mv_cesa_req base; + struct mv_cesa_ablkcipher_std_req std; + } req; + int src_nents; + int dst_nents; +}; + +/** + * struct mv_cesa_ahash_std_req - standard hash request + * @base: base information + * @offset: current operation offset + */ +struct mv_cesa_ahash_std_req { + struct mv_cesa_req base; + unsigned int offset; +}; + +/** + * struct mv_cesa_ahash_req - hash request + * @req: type specific request information + * @cache: cache buffer + * @cache_ptr: write pointer in the cache buffer + * @len: hash total length + * @src_nents: number of entries in the scatterlist + * @last_req: define whether the current operation is the last one + * or not + * @state: hash state + */ +struct mv_cesa_ahash_req { + union { + struct mv_cesa_req base; + struct mv_cesa_ahash_std_req std; + } req; + struct mv_cesa_op_ctx op_tmpl; + u8 *cache; + unsigned int cache_ptr; + u64 len; + int src_nents; + bool last_req; + __be32 state[8]; +}; + +/* CESA functions */ + +extern struct mv_cesa_dev *cesa_dev; + +static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op, + u32 cfg, u32 mask) +{ + op->desc.config &= cpu_to_le32(~mask); + op->desc.config |= cpu_to_le32(cfg); +} + +static inline u32 mv_cesa_get_op_cfg(struct mv_cesa_op_ctx *op) +{ + return le32_to_cpu(op->desc.config); +} + +static inline void mv_cesa_set_op_cfg(struct mv_cesa_op_ctx *op, u32 cfg) +{ + op->desc.config = cpu_to_le32(cfg); +} + +static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine, + struct mv_cesa_op_ctx *op) +{ + u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK; + + op->desc.enc_p = CESA_SA_DESC_CRYPT_DATA(offset); + op->desc.enc_key_p = CESA_SA_DESC_CRYPT_KEY(offset); + op->desc.enc_iv = CESA_SA_DESC_CRYPT_IV(offset); + op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_DATA_MSK; + op->desc.mac_src_p |= CESA_SA_DESC_MAC_DATA(offset); + op->desc.mac_digest &= ~CESA_SA_DESC_MAC_DIGEST_MSK; + op->desc.mac_digest |= CESA_SA_DESC_MAC_DIGEST(offset); + op->desc.mac_iv = CESA_SA_DESC_MAC_IV(offset); +} + +static inline void mv_cesa_set_crypt_op_len(struct mv_cesa_op_ctx *op, int len) +{ + op->desc.enc_len = cpu_to_le32(len); +} + +static inline void mv_cesa_set_mac_op_total_len(struct mv_cesa_op_ctx *op, + int len) +{ + op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_TOTAL_LEN_MSK; + op->desc.mac_src_p |= CESA_SA_DESC_MAC_TOTAL_LEN(len); +} + +static inline void mv_cesa_set_mac_op_frag_len(struct mv_cesa_op_ctx *op, + int len) +{ + op->desc.mac_digest &= ~CESA_SA_DESC_MAC_FRAG_LEN_MSK; + op->desc.mac_digest |= CESA_SA_DESC_MAC_FRAG_LEN(len); +} + +static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine, + u32 int_mask) +{ + if (int_mask == engine->int_mask) + return; + + writel(int_mask, engine->regs + CESA_SA_INT_MSK); + engine->int_mask = int_mask; +} + +static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) +{ + return engine->int_mask; +} + +int mv_cesa_queue_req(struct crypto_async_request *req); + +/* Algorithm definitions */ + +extern struct ahash_alg mv_sha1_alg; +extern struct ahash_alg mv_ahmac_sha1_alg; + +extern struct crypto_alg mv_cesa_ecb_aes_alg; +extern struct crypto_alg mv_cesa_cbc_aes_alg; + +#endif /* __MARVELL_CESA_H__ */ diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c new file mode 100644 index 000000000000..e6eea488f11e --- /dev/null +++ b/drivers/crypto/marvell/cipher.c @@ -0,0 +1,331 @@ +/* + * Cipher algorithms supported by the CESA: DES, 3DES and AES. + * + * Author: Boris Brezillon + * Author: Arnaud Ebalard + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include + +#include "cesa.h" + +struct mv_cesa_aes_ctx { + struct mv_cesa_ctx base; + struct crypto_aes_ctx aes; +}; + +static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + size_t len = min_t(size_t, req->nbytes - sreq->offset, + CESA_SA_SRAM_PAYLOAD_SIZE); + + len = sg_pcopy_to_buffer(req->src, creq->src_nents, + engine->sram + CESA_SA_DATA_SRAM_OFFSET, + len, sreq->offset); + + sreq->size = len; + mv_cesa_set_crypt_op_len(&sreq->op, len); + + /* FIXME: only update enc_len field */ + if (!sreq->skip_ctx) { + memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); + sreq->skip_ctx = true; + } else { + memcpy(engine->sram, &sreq->op, sizeof(sreq->op.desc)); + } + + mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); + writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); + writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); +} + +static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, + u32 status) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + size_t len; + + len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, + engine->sram + CESA_SA_DATA_SRAM_OFFSET, + sreq->size, sreq->offset); + + sreq->offset += len; + if (sreq->offset < req->nbytes) + return -EINPROGRESS; + + return 0; +} + +static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, + u32 status) +{ + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + int ret; + + ret = mv_cesa_ablkcipher_std_process(ablkreq, status); + if (ret) + return ret; + + memcpy(ablkreq->info, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, + crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq))); + + return 0; +} + +static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) +{ + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + + mv_cesa_ablkcipher_std_step(ablkreq); +} + +static inline void +mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + + sreq->size = 0; + sreq->offset = 0; + mv_cesa_adjust_op(engine, &sreq->op); + memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); +} + +static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, + struct mv_cesa_engine *engine) +{ + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); + + creq->req.base.engine = engine; + + mv_cesa_ablkcipher_std_prepare(ablkreq); +} + +static inline void +mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) +{ +} + +static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { + .step = mv_cesa_ablkcipher_step, + .process = mv_cesa_ablkcipher_process, + .prepare = mv_cesa_ablkcipher_prepare, + .cleanup = mv_cesa_ablkcipher_req_cleanup, +}; + +static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) +{ + struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->base.ops = &mv_cesa_ablkcipher_req_ops; + + tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req); + + return 0; +} + +static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); + int remaining; + int offset; + int ret; + int i; + + ret = crypto_aes_expand_key(&ctx->aes, key, len); + if (ret) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } + + remaining = (ctx->aes.key_length - 16) / 4; + offset = ctx->aes.key_length + 24 - remaining; + for (i = 0; i < remaining; i++) + ctx->aes.key_dec[4 + i] = + cpu_to_le32(ctx->aes.key_enc[offset + i]); + + return 0; +} + +static inline int +mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, + const struct mv_cesa_op_ctx *op_templ) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + + sreq->base.type = CESA_STD_REQ; + sreq->op = *op_templ; + sreq->skip_ctx = false; + + return 0; +} + +static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + unsigned int blksize = crypto_ablkcipher_blocksize(tfm); + + if (!IS_ALIGNED(req->nbytes, blksize)) + return -EINVAL; + + creq->src_nents = sg_nents_for_len(req->src, req->nbytes); + creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, + CESA_SA_DESC_CFG_OP_MSK); + + return mv_cesa_ablkcipher_std_req_init(req, tmpl); +} + +static int mv_cesa_aes_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ret, i; + u32 *key; + u32 cfg; + + cfg = CESA_SA_DESC_CFG_CRYPTM_AES; + + if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC) + key = ctx->aes.key_dec; + else + key = ctx->aes.key_enc; + + for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++) + tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]); + + if (ctx->aes.key_length == 24) + cfg |= CESA_SA_DESC_CFG_AES_LEN_192; + else if (ctx->aes.key_length == 32) + cfg |= CESA_SA_DESC_CFG_AES_LEN_256; + + mv_cesa_update_op_cfg(tmpl, cfg, + CESA_SA_DESC_CFG_CRYPTM_MSK | + CESA_SA_DESC_CFG_AES_LEN_MSK); + + ret = mv_cesa_ablkcipher_req_init(req, tmpl); + if (ret) + return ret; + + return mv_cesa_queue_req(&req->base); +} + +static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_aes_op(req, &tmpl); +} + +static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_aes_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "mv-ecb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = mv_cesa_aes_setkey, + .encrypt = mv_cesa_ecb_aes_encrypt, + .decrypt = mv_cesa_ecb_aes_decrypt, + }, + }, +}; + +static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, + CESA_SA_DESC_CFG_CRYPTCM_MSK); + memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE); + + return mv_cesa_aes_op(req, tmpl); +} + +static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_cbc_aes_op(req, &tmpl); +} + +static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_cbc_aes_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "mv-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = mv_cesa_aes_setkey, + .encrypt = mv_cesa_cbc_aes_encrypt, + .decrypt = mv_cesa_cbc_aes_decrypt, + }, + }, +}; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c new file mode 100644 index 000000000000..2d33f6816c31 --- /dev/null +++ b/drivers/crypto/marvell/hash.c @@ -0,0 +1,720 @@ +/* + * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. + * + * Author: Boris Brezillon + * Author: Arnaud Ebalard + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include + +#include "cesa.h" + +static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags); + if (!creq->cache) + return -ENOMEM; + + return 0; +} + +static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + + if (creq->cache) + return 0; + + return mv_cesa_ahash_std_alloc_cache(creq, flags); +} + +static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq) +{ + kfree(creq->cache); +} + +static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq) +{ + if (!creq->cache) + return; + + mv_cesa_ahash_std_free_cache(creq); + + creq->cache = NULL; +} + +static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + mv_cesa_ahash_free_cache(creq); +} + +static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) +{ + unsigned int index, padlen; + + index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; + padlen = (index < 56) ? (56 - index) : (64 + 56 - index); + + return padlen; +} + +static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) +{ + __be64 bits = cpu_to_be64(creq->len << 3); + unsigned int index, padlen; + + buf[0] = 0x80; + /* Pad out to 56 mod 64 */ + index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; + padlen = mv_cesa_ahash_pad_len(creq); + memset(buf + 1, 0, padlen - 1); + memcpy(buf + padlen, &bits, sizeof(bits)); + + return padlen + 8; +} + +static void mv_cesa_ahash_std_step(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_ahash_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + struct mv_cesa_op_ctx *op; + unsigned int new_cache_ptr = 0; + u32 frag_mode; + size_t len; + + if (creq->cache_ptr) + memcpy(engine->sram + CESA_SA_DATA_SRAM_OFFSET, creq->cache, + creq->cache_ptr); + + len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, + CESA_SA_SRAM_PAYLOAD_SIZE); + + if (!creq->last_req) { + new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; + len &= ~CESA_HASH_BLOCK_SIZE_MSK; + } + + if (len - creq->cache_ptr) + sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, + engine->sram + + CESA_SA_DATA_SRAM_OFFSET + + creq->cache_ptr, + len - creq->cache_ptr, + sreq->offset); + + op = &creq->op_tmpl; + + frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; + + if (creq->last_req && sreq->offset == req->nbytes && + creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { + if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) + frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; + else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) + frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; + } + + if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || + frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { + if (len && + creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { + mv_cesa_set_mac_op_total_len(op, creq->len); + } else { + int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; + + if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { + len &= CESA_HASH_BLOCK_SIZE_MSK; + new_cache_ptr = 64 - trailerlen; + memcpy(creq->cache, + engine->sram + + CESA_SA_DATA_SRAM_OFFSET + len, + new_cache_ptr); + } else { + len += mv_cesa_ahash_pad_req(creq, + engine->sram + len + + CESA_SA_DATA_SRAM_OFFSET); + } + + if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) + frag_mode = CESA_SA_DESC_CFG_MID_FRAG; + else + frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; + } + } + + mv_cesa_set_mac_op_frag_len(op, len); + mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); + + /* FIXME: only update enc_len field */ + memcpy(engine->sram, op, sizeof(*op)); + + if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) + mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + + creq->cache_ptr = new_cache_ptr; + + mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); + writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); + writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); +} + +static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_ahash_std_req *sreq = &creq->req.std; + + if (sreq->offset < (req->nbytes - creq->cache_ptr)) + return -EINPROGRESS; + + return 0; +} + +static void mv_cesa_ahash_std_prepare(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_ahash_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + + sreq->offset = 0; + mv_cesa_adjust_op(engine, &creq->op_tmpl); + memcpy(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); +} + +static void mv_cesa_ahash_step(struct crypto_async_request *req) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + + mv_cesa_ahash_std_step(ahashreq); +} + +static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); + struct mv_cesa_engine *engine = creq->req.base.engine; + unsigned int digsize; + int ret, i; + + ret = mv_cesa_ahash_std_process(ahashreq, status); + if (ret == -EINPROGRESS) + return ret; + + digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); + for (i = 0; i < digsize / 4; i++) + creq->state[i] = readl(engine->regs + CESA_IVDIG(i)); + + if (creq->cache_ptr) + sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, + creq->cache, + creq->cache_ptr, + ahashreq->nbytes - creq->cache_ptr); + + if (creq->last_req) { + for (i = 0; i < digsize / 4; i++) + creq->state[i] = cpu_to_be32(creq->state[i]); + + memcpy(ahashreq->result, creq->state, digsize); + } + + return ret; +} + +static void mv_cesa_ahash_prepare(struct crypto_async_request *req, + struct mv_cesa_engine *engine) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); + unsigned int digsize; + int i; + + creq->req.base.engine = engine; + + mv_cesa_ahash_std_prepare(ahashreq); + + digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); + for (i = 0; i < digsize / 4; i++) + writel(creq->state[i], + engine->regs + CESA_IVDIG(i)); +} + +static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); + + if (creq->last_req) + mv_cesa_ahash_last_cleanup(ahashreq); +} + +static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { + .step = mv_cesa_ahash_step, + .process = mv_cesa_ahash_process, + .prepare = mv_cesa_ahash_prepare, + .cleanup = mv_cesa_ahash_req_cleanup, +}; + +static int mv_cesa_ahash_init(struct ahash_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + memset(creq, 0, sizeof(*creq)); + mv_cesa_update_op_cfg(tmpl, + CESA_SA_DESC_CFG_OP_MAC_ONLY | + CESA_SA_DESC_CFG_FIRST_FRAG, + CESA_SA_DESC_CFG_OP_MSK | + CESA_SA_DESC_CFG_FRAG_MSK); + mv_cesa_set_mac_op_total_len(tmpl, 0); + mv_cesa_set_mac_op_frag_len(tmpl, 0); + creq->op_tmpl = *tmpl; + creq->len = 0; + + return 0; +} + +static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) +{ + struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->base.ops = &mv_cesa_ahash_req_ops; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct mv_cesa_ahash_req)); + return 0; +} + +static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + int ret; + + if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) && + !creq->last_req) { + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + } + + if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { + *cached = true; + + if (!req->nbytes) + return 0; + + sg_pcopy_to_buffer(req->src, creq->src_nents, + creq->cache + creq->cache_ptr, + req->nbytes, 0); + + creq->cache_ptr += req->nbytes; + } + + return 0; +} + +static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + creq->req.base.type = CESA_STD_REQ; + creq->src_nents = sg_nents_for_len(req->src, req->nbytes); + + return mv_cesa_ahash_cache_req(req, cached); +} + +static int mv_cesa_ahash_update(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + bool cached = false; + int ret; + + creq->len += req->nbytes; + ret = mv_cesa_ahash_req_init(req, &cached); + if (ret) + return ret; + + if (cached) + return 0; + + return mv_cesa_queue_req(&req->base); +} + +static int mv_cesa_ahash_final(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; + bool cached = false; + int ret; + + mv_cesa_set_mac_op_total_len(tmpl, creq->len); + creq->last_req = true; + req->nbytes = 0; + + ret = mv_cesa_ahash_req_init(req, &cached); + if (ret) + return ret; + + if (cached) + return 0; + + return mv_cesa_queue_req(&req->base); +} + +static int mv_cesa_ahash_finup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; + bool cached = false; + int ret; + + creq->len += req->nbytes; + mv_cesa_set_mac_op_total_len(tmpl, creq->len); + creq->last_req = true; + + ret = mv_cesa_ahash_req_init(req, &cached); + if (ret) + return ret; + + if (cached) + return 0; + + return mv_cesa_queue_req(&req->base); +} + +static int mv_cesa_sha1_init(struct ahash_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_sha1_export(struct ahash_request *req, void *out) +{ + struct sha1_state *out_state = out; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + + out_state->count = creq->len; + memcpy(out_state->state, creq->state, digsize); + memset(out_state->buffer, 0, sizeof(out_state->buffer)); + if (creq->cache) + memcpy(out_state->buffer, creq->cache, creq->cache_ptr); + + return 0; +} + +static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) +{ + const struct sha1_state *in_state = in; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + unsigned int cache_ptr; + int ret; + + creq->len = in_state->count; + memcpy(creq->state, in_state->state, digsize); + creq->cache_ptr = 0; + + cache_ptr = creq->len % SHA1_BLOCK_SIZE; + if (!cache_ptr) + return 0; + + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + + memcpy(creq->cache, in_state->buffer, cache_ptr); + creq->cache_ptr = cache_ptr; + + return 0; +} + +static int mv_cesa_sha1_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_sha1_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_sha1_alg = { + .init = mv_cesa_sha1_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_sha1_digest, + .export = mv_cesa_sha1_export, + .import = mv_cesa_sha1_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "sha1", + .cra_driver_name = "mv-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), + .cra_init = mv_cesa_ahash_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + +struct mv_cesa_ahash_result { + struct completion completion; + int error; +}; + +static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, + int error) +{ + struct mv_cesa_ahash_result *result = req->data; + + if (error == -EINPROGRESS) + return; + + result->error = error; + complete(&result->completion); +} + +static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, + void *state, unsigned int blocksize) +{ + struct mv_cesa_ahash_result result; + struct scatterlist sg; + int ret; + + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + mv_cesa_hmac_ahash_complete, &result); + sg_init_one(&sg, pad, blocksize); + ahash_request_set_crypt(req, &sg, pad, blocksize); + init_completion(&result.completion); + + ret = crypto_ahash_init(req); + if (ret) + return ret; + + ret = crypto_ahash_update(req); + if (ret && ret != -EINPROGRESS) + return ret; + + wait_for_completion_interruptible(&result.completion); + if (result.error) + return result.error; + + ret = crypto_ahash_export(req, state); + if (ret) + return ret; + + return 0; +} + +static int mv_cesa_ahmac_pad_init(struct ahash_request *req, + const u8 *key, unsigned int keylen, + u8 *ipad, u8 *opad, + unsigned int blocksize) +{ + struct mv_cesa_ahash_result result; + struct scatterlist sg; + int ret; + int i; + + if (keylen <= blocksize) { + memcpy(ipad, key, keylen); + } else { + u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); + + if (!keydup) + return -ENOMEM; + + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + mv_cesa_hmac_ahash_complete, + &result); + sg_init_one(&sg, keydup, keylen); + ahash_request_set_crypt(req, &sg, ipad, keylen); + init_completion(&result.completion); + + ret = crypto_ahash_digest(req); + if (ret == -EINPROGRESS) { + wait_for_completion_interruptible(&result.completion); + ret = result.error; + } + + /* Set the memory region to 0 to avoid any leak. */ + memset(keydup, 0, keylen); + kfree(keydup); + + if (ret) + return ret; + + keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); + } + + memset(ipad + keylen, 0, blocksize - keylen); + memcpy(opad, ipad, blocksize); + + for (i = 0; i < blocksize; i++) { + ipad[i] ^= 0x36; + opad[i] ^= 0x5c; + } + + return 0; +} + +static int mv_cesa_ahmac_setkey(const char *hash_alg_name, + const u8 *key, unsigned int keylen, + void *istate, void *ostate) +{ + struct ahash_request *req; + struct crypto_ahash *tfm; + unsigned int blocksize; + u8 *ipad = NULL; + u8 *opad; + int ret; + + tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH, + CRYPTO_ALG_TYPE_AHASH_MASK); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto free_ahash; + } + + crypto_ahash_clear_flags(tfm, ~0); + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + + ipad = kzalloc(2 * blocksize, GFP_KERNEL); + if (!ipad) { + ret = -ENOMEM; + goto free_req; + } + + opad = ipad + blocksize; + + ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); + if (ret) + goto free_ipad; + + ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); + if (ret) + goto free_ipad; + + ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); + +free_ipad: + kfree(ipad); +free_req: + ahash_request_free(req); +free_ahash: + crypto_free_ahash(tfm); + + return ret; +} + +static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->base.ops = &mv_cesa_ahash_req_ops; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct mv_cesa_ahash_req)); + return 0; +} + +static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); + memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct sha1_state istate, ostate; + int ret, i; + + ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(istate.state); i++) + ctx->iv[i] = be32_to_cpu(istate.state[i]); + + for (i = 0; i < ARRAY_SIZE(ostate.state); i++) + ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); + + return 0; +} + +static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_ahmac_sha1_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_ahmac_sha1_alg = { + .init = mv_cesa_ahmac_sha1_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_ahmac_sha1_digest, + .setkey = mv_cesa_ahmac_sha1_setkey, + .export = mv_cesa_sha1_export, + .import = mv_cesa_sha1_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "mv-hmac-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), + .cra_init = mv_cesa_ahmac_cra_init, + .cra_module = THIS_MODULE, + } + } +}; -- cgit v1.2.3 From db509a45339fd786de355b11db34ff7421488cb1 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:21 +0200 Subject: crypto: marvell/cesa - add TDMA support The CESA IP supports CPU offload through a dedicated DMA engine (TDMA) which can control the crypto block. When you use this mode, all the required data (operation metadata and payload data) are transferred using DMA, and the results are retrieved through DMA when possible (hash results are not retrieved through DMA yet), thus reducing the involvement of the CPU and providing better performances in most cases (for small requests, the cost of DMA preparation might exceed the performance gain). Note that some CESA IPs do not embed this dedicated DMA, hence the activation of this feature on a per platform basis. Signed-off-by: Boris Brezillon Signed-off-by: Arnaud Ebalard Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + drivers/crypto/marvell/Makefile | 2 +- drivers/crypto/marvell/cesa.c | 68 +++++++ drivers/crypto/marvell/cesa.h | 229 ++++++++++++++++++++++ drivers/crypto/marvell/cipher.c | 179 ++++++++++++++++- drivers/crypto/marvell/hash.c | 414 +++++++++++++++++++++++++++++++++++++++- drivers/crypto/marvell/tdma.c | 224 ++++++++++++++++++++++ 7 files changed, 1101 insertions(+), 16 deletions(-) create mode 100644 drivers/crypto/marvell/tdma.c (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index dbb35bbefaf3..fd69aa04b475 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -184,6 +184,7 @@ config CRYPTO_DEV_MARVELL_CESA help This driver allows you to utilize the Cryptographic Engines and Security Accelerator (CESA) which can be found on the Armada 370. + This driver supports CPU offload through DMA transfers. This driver is aimed at replacing the mv_cesa driver. This will only happen once it has received proper testing. diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile index 68d0982c3416..0c12b13574dc 100644 --- a/drivers/crypto/marvell/Makefile +++ b/drivers/crypto/marvell/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o -marvell-cesa-objs := cesa.o cipher.o hash.o +marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 76a694303839..986f024d73fa 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -184,6 +184,7 @@ static const struct mv_cesa_caps armada_370_caps = { .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), .ahash_algs = armada_370_ahash_algs, .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), + .has_tdma = true, }; static const struct of_device_id mv_cesa_of_match_table[] = { @@ -192,6 +193,66 @@ static const struct of_device_id mv_cesa_of_match_table[] = { }; MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); +static void +mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine, + const struct mbus_dram_target_info *dram) +{ + void __iomem *iobase = engine->regs; + int i; + + for (i = 0; i < 4; i++) { + writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i)); + writel(0, iobase + CESA_TDMA_WINDOW_BASE(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + iobase + CESA_TDMA_WINDOW_CTRL(i)); + writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i)); + } +} + +static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) +{ + struct device *dev = cesa->dev; + struct mv_cesa_dev_dma *dma; + + if (!cesa->caps->has_tdma) + return 0; + + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); + if (!dma) + return -ENOMEM; + + dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev, + sizeof(struct mv_cesa_tdma_desc), + 16, 0); + if (!dma->tdma_desc_pool) + return -ENOMEM; + + dma->op_pool = dmam_pool_create("cesa_op", dev, + sizeof(struct mv_cesa_op_ctx), 16, 0); + if (!dma->op_pool) + return -ENOMEM; + + dma->cache_pool = dmam_pool_create("cesa_cache", dev, + CESA_MAX_HASH_BLOCK_SIZE, 1, 0); + if (!dma->cache_pool) + return -ENOMEM; + + dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); + if (!dma->cache_pool) + return -ENOMEM; + + cesa->dma = dma; + + return 0; +} + static int mv_cesa_get_sram(struct platform_device *pdev, int idx) { struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); @@ -299,6 +360,10 @@ static int mv_cesa_probe(struct platform_device *pdev) if (IS_ERR(cesa->regs)) return -ENOMEM; + ret = mv_cesa_dev_dma_init(cesa); + if (ret) + return ret; + dram = mv_mbus_dram_info_nooverlap(); platform_set_drvdata(pdev, cesa); @@ -347,6 +412,9 @@ static int mv_cesa_probe(struct platform_device *pdev) engine->regs = cesa->regs + CESA_ENGINE_OFF(i); + if (dram && cesa->caps->has_tdma) + mv_cesa_conf_mbus_windows(&cesa->engines[i], dram); + writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS); writel(CESA_SA_CFG_STOP_DIG_ERR, cesa->engines[i].regs + CESA_SA_CFG); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index f68057c7d96a..f8faf260b6d7 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -6,6 +6,7 @@ #include #include +#include #define CESA_ENGINE_OFF(i) (((i) * 0x2000)) @@ -267,11 +268,94 @@ struct mv_cesa_op_ctx { } ctx; }; +/* TDMA descriptor flags */ +#define CESA_TDMA_DST_IN_SRAM BIT(31) +#define CESA_TDMA_SRC_IN_SRAM BIT(30) +#define CESA_TDMA_TYPE_MSK GENMASK(29, 0) +#define CESA_TDMA_DUMMY 0 +#define CESA_TDMA_DATA 1 +#define CESA_TDMA_OP 2 + +/** + * struct mv_cesa_tdma_desc - TDMA descriptor + * @byte_cnt: number of bytes to transfer + * @src: DMA address of the source + * @dst: DMA address of the destination + * @next_dma: DMA address of the next TDMA descriptor + * @cur_dma: DMA address of this TDMA descriptor + * @next: pointer to the next TDMA descriptor + * @op: CESA operation attached to this TDMA descriptor + * @data: raw data attached to this TDMA descriptor + * @flags: flags describing the TDMA transfer. See the + * "TDMA descriptor flags" section above + * + * TDMA descriptor used to create a transfer chain describing a crypto + * operation. + */ +struct mv_cesa_tdma_desc { + u32 byte_cnt; + u32 src; + u32 dst; + u32 next_dma; + u32 cur_dma; + struct mv_cesa_tdma_desc *next; + union { + struct mv_cesa_op_ctx *op; + void *data; + }; + u32 flags; +}; + +/** + * struct mv_cesa_sg_dma_iter - scatter-gather iterator + * @dir: transfer direction + * @sg: scatter list + * @offset: current position in the scatter list + * @op_offset: current position in the crypto operation + * + * Iterator used to iterate over a scatterlist while creating a TDMA chain for + * a crypto operation. + */ +struct mv_cesa_sg_dma_iter { + enum dma_data_direction dir; + struct scatterlist *sg; + unsigned int offset; + unsigned int op_offset; +}; + +/** + * struct mv_cesa_dma_iter - crypto operation iterator + * @len: the crypto operation length + * @offset: current position in the crypto operation + * @op_len: sub-operation length (the crypto engine can only act on 2kb + * chunks) + * + * Iterator used to create a TDMA chain for a given crypto operation. + */ +struct mv_cesa_dma_iter { + unsigned int len; + unsigned int offset; + unsigned int op_len; +}; + +/** + * struct mv_cesa_tdma_chain - TDMA chain + * @first: first entry in the TDMA chain + * @last: last entry in the TDMA chain + * + * Stores a TDMA chain for a specific crypto operation. + */ +struct mv_cesa_tdma_chain { + struct mv_cesa_tdma_desc *first; + struct mv_cesa_tdma_desc *last; +}; + struct mv_cesa_engine; /** * struct mv_cesa_caps - CESA device capabilities * @engines: number of engines + * @has_tdma: whether this device has a TDMA block * @cipher_algs: supported cipher algorithms * @ncipher_algs: number of supported cipher algorithms * @ahash_algs: supported hash algorithms @@ -281,12 +365,31 @@ struct mv_cesa_engine; */ struct mv_cesa_caps { int nengines; + bool has_tdma; struct crypto_alg **cipher_algs; int ncipher_algs; struct ahash_alg **ahash_algs; int nahash_algs; }; +/** + * struct mv_cesa_dev_dma - DMA pools + * @tdma_desc_pool: TDMA desc pool + * @op_pool: crypto operation pool + * @cache_pool: data cache pool (used by hash implementation when the + * hash request is smaller than the hash block size) + * @padding_pool: padding pool (used by hash implementation when hardware + * padding cannot be used) + * + * Structure containing the different DMA pools used by this driver. + */ +struct mv_cesa_dev_dma { + struct dma_pool *tdma_desc_pool; + struct dma_pool *op_pool; + struct dma_pool *cache_pool; + struct dma_pool *padding_pool; +}; + /** * struct mv_cesa_dev - CESA device * @caps: device capabilities @@ -295,6 +398,7 @@ struct mv_cesa_caps { * @lock: device lock * @queue: crypto request queue * @engines: array of engines + * @dma: dma pools * * Structure storing CESA device information. */ @@ -306,6 +410,7 @@ struct mv_cesa_dev { spinlock_t lock; struct crypto_queue queue; struct mv_cesa_engine *engines; + struct mv_cesa_dev_dma *dma; }; /** @@ -391,9 +496,11 @@ struct mv_cesa_hmac_ctx { /** * enum mv_cesa_req_type - request type definitions * @CESA_STD_REQ: standard request + * @CESA_DMA_REQ: DMA request */ enum mv_cesa_req_type { CESA_STD_REQ, + CESA_DMA_REQ, }; /** @@ -406,6 +513,27 @@ struct mv_cesa_req { struct mv_cesa_engine *engine; }; +/** + * struct mv_cesa_tdma_req - CESA TDMA request + * @base: base information + * @chain: TDMA chain + */ +struct mv_cesa_tdma_req { + struct mv_cesa_req base; + struct mv_cesa_tdma_chain chain; +}; + +/** + * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard + * requests + * @iter: sg mapping iterator + * @offset: current offset in the SG entry mapped in memory + */ +struct mv_cesa_sg_std_iter { + struct sg_mapping_iter iter; + unsigned int offset; +}; + /** * struct mv_cesa_ablkcipher_std_req - cipher standard request * @base: base information @@ -430,6 +558,7 @@ struct mv_cesa_ablkcipher_std_req { struct mv_cesa_ablkcipher_req { union { struct mv_cesa_req base; + struct mv_cesa_tdma_req dma; struct mv_cesa_ablkcipher_std_req std; } req; int src_nents; @@ -446,6 +575,20 @@ struct mv_cesa_ahash_std_req { unsigned int offset; }; +/** + * struct mv_cesa_ahash_dma_req - DMA hash request + * @base: base information + * @padding: padding buffer + * @padding_dma: DMA address of the padding buffer + * @cache_dma: DMA address of the cache buffer + */ +struct mv_cesa_ahash_dma_req { + struct mv_cesa_tdma_req base; + u8 *padding; + dma_addr_t padding_dma; + dma_addr_t cache_dma; +}; + /** * struct mv_cesa_ahash_req - hash request * @req: type specific request information @@ -460,6 +603,7 @@ struct mv_cesa_ahash_std_req { struct mv_cesa_ahash_req { union { struct mv_cesa_req base; + struct mv_cesa_ahash_dma_req dma; struct mv_cesa_ahash_std_req std; } req; struct mv_cesa_op_ctx op_tmpl; @@ -543,6 +687,91 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) int mv_cesa_queue_req(struct crypto_async_request *req); +/* TDMA functions */ + +static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, + unsigned int len) +{ + iter->len = len; + iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE); + iter->offset = 0; +} + +static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter, + struct scatterlist *sg, + enum dma_data_direction dir) +{ + iter->op_offset = 0; + iter->offset = 0; + iter->sg = sg; + iter->dir = dir; +} + +static inline unsigned int +mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter, + struct mv_cesa_sg_dma_iter *sgiter) +{ + return min(iter->op_len - sgiter->op_offset, + sg_dma_len(sgiter->sg) - sgiter->offset); +} + +bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *chain, + struct mv_cesa_sg_dma_iter *sgiter, + unsigned int len); + +static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter) +{ + iter->offset += iter->op_len; + iter->op_len = min(iter->len - iter->offset, + CESA_SA_SRAM_PAYLOAD_SIZE); + + return iter->op_len; +} + +void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq); + +static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq, + u32 status) +{ + if (!(status & CESA_SA_INT_ACC0_IDMA_DONE)) + return -EINPROGRESS; + + if (status & CESA_SA_INT_IDMA_OWN_ERR) + return -EINVAL; + + return 0; +} + +void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, + struct mv_cesa_engine *engine); + +void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq); + +static inline void +mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain) +{ + memset(chain, 0, sizeof(*chain)); +} + +struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, + const struct mv_cesa_op_ctx *op_templ, + bool skip_ctx, + gfp_t flags); + +int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, + dma_addr_t dst, dma_addr_t src, u32 size, + u32 flags, gfp_t gfp_flags); + +int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, + u32 flags); + +int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags); + +int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_dma_iter *dma_iter, + struct mv_cesa_sg_dma_iter *sgiter, + gfp_t gfp_flags); + /* Algorithm definitions */ extern struct ahash_alg mv_sha1_alg; diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index e6eea488f11e..e21783b83046 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -21,6 +21,55 @@ struct mv_cesa_aes_ctx { struct crypto_aes_ctx aes; }; +struct mv_cesa_ablkcipher_dma_iter { + struct mv_cesa_dma_iter base; + struct mv_cesa_sg_dma_iter src; + struct mv_cesa_sg_dma_iter dst; +}; + +static inline void +mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, + struct ablkcipher_request *req) +{ + mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); + mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); + mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); +} + +static inline bool +mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) +{ + iter->src.op_offset = 0; + iter->dst.op_offset = 0; + + return mv_cesa_req_dma_iter_next_op(&iter->base); +} + +static inline void +mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + + if (req->dst != req->src) { + dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + } else { + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_BIDIRECTIONAL); + } + mv_cesa_dma_cleanup(&creq->req.dma); +} + +static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ablkcipher_dma_cleanup(req); +} + static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) { struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); @@ -77,7 +126,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, struct mv_cesa_engine *engine = sreq->base.engine; int ret; - ret = mv_cesa_ablkcipher_std_process(ablkreq, status); + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_dma_process(&creq->req.dma, status); + else + ret = mv_cesa_ablkcipher_std_process(ablkreq, status); + if (ret) return ret; @@ -90,8 +143,21 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) { struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); - mv_cesa_ablkcipher_std_step(ablkreq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_dma_step(&creq->req.dma); + else + mv_cesa_ablkcipher_std_step(ablkreq); +} + +static inline void +mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_tdma_req *dreq = &creq->req.dma; + + mv_cesa_dma_prepare(dreq, dreq->base.engine); } static inline void @@ -115,12 +181,18 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, creq->req.base.engine = engine; - mv_cesa_ablkcipher_std_prepare(ablkreq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ablkcipher_dma_prepare(ablkreq); + else + mv_cesa_ablkcipher_std_prepare(ablkreq); } static inline void mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) { + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + + mv_cesa_ablkcipher_cleanup(ablkreq); } static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { @@ -166,6 +238,92 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, return 0; } +static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, + const struct mv_cesa_op_ctx *op_templ) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + struct mv_cesa_tdma_req *dreq = &creq->req.dma; + struct mv_cesa_ablkcipher_dma_iter iter; + struct mv_cesa_tdma_chain chain; + bool skip_ctx = false; + int ret; + + dreq->base.type = CESA_DMA_REQ; + dreq->chain.first = NULL; + dreq->chain.last = NULL; + + if (req->src != req->dst) { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + if (!ret) + return -ENOMEM; + + ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + if (!ret) { + ret = -ENOMEM; + goto err_unmap_src; + } + } else { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_BIDIRECTIONAL); + if (!ret) + return -ENOMEM; + } + + mv_cesa_tdma_desc_iter_init(&chain); + mv_cesa_ablkcipher_req_iter_init(&iter, req); + + do { + struct mv_cesa_op_ctx *op; + + op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + skip_ctx = true; + + mv_cesa_set_crypt_op_len(op, iter.base.op_len); + + /* Add input transfers */ + ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + &iter.src, flags); + if (ret) + goto err_free_tdma; + + /* Add dummy desc to launch the crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(&chain, flags); + if (ret) + goto err_free_tdma; + + /* Add output transfers */ + ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + &iter.dst, flags); + if (ret) + goto err_free_tdma; + + } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); + + dreq->chain = chain; + + return 0; + +err_free_tdma: + mv_cesa_dma_cleanup(dreq); + if (req->dst != req->src) + dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + +err_unmap_src: + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); + + return ret; +} + static inline int mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, const struct mv_cesa_op_ctx *op_templ) @@ -186,6 +344,7 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); unsigned int blksize = crypto_ablkcipher_blocksize(tfm); + int ret; if (!IS_ALIGNED(req->nbytes, blksize)) return -EINVAL; @@ -196,7 +355,13 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, CESA_SA_DESC_CFG_OP_MSK); - return mv_cesa_ablkcipher_std_req_init(req, tmpl); + /* TODO: add a threshold for DMA usage */ + if (cesa_dev->caps->has_tdma) + ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); + else + ret = mv_cesa_ablkcipher_std_req_init(req, tmpl); + + return ret; } static int mv_cesa_aes_op(struct ablkcipher_request *req, @@ -230,7 +395,11 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req, if (ret) return ret; - return mv_cesa_queue_req(&req->base); + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; } static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 2d33f6816c31..528da260a7cc 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -16,6 +16,47 @@ #include "cesa.h" +struct mv_cesa_ahash_dma_iter { + struct mv_cesa_dma_iter base; + struct mv_cesa_sg_dma_iter src; +}; + +static inline void +mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, + struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int len = req->nbytes; + + if (!creq->last_req) + len = (len + creq->cache_ptr) & ~CESA_HASH_BLOCK_SIZE_MSK; + + mv_cesa_req_dma_iter_init(&iter->base, len); + mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); + iter->src.op_offset = creq->cache_ptr; +} + +static inline bool +mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) +{ + iter->src.op_offset = 0; + + return mv_cesa_req_dma_iter_next_op(&iter->base); +} + +static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; + + creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, + &dreq->cache_dma); + if (!creq->cache) + return -ENOMEM; + + return 0; +} + static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq, gfp_t flags) { @@ -31,11 +72,23 @@ static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; + int ret; if (creq->cache) return 0; - return mv_cesa_ahash_std_alloc_cache(creq, flags); + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_ahash_dma_alloc_cache(creq, flags); + else + ret = mv_cesa_ahash_std_alloc_cache(creq, flags); + + return ret; +} + +static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq) +{ + dma_pool_free(cesa_dev->dma->cache_pool, creq->cache, + creq->req.dma.cache_dma); } static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq) @@ -48,16 +101,69 @@ static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq) if (!creq->cache) return; - mv_cesa_ahash_std_free_cache(creq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_free_cache(creq); + else + mv_cesa_ahash_std_free_cache(creq); creq->cache = NULL; } +static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, + gfp_t flags) +{ + if (req->padding) + return 0; + + req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, + &req->padding_dma); + if (!req->padding) + return -ENOMEM; + + return 0; +} + +static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) +{ + if (!req->padding) + return; + + dma_pool_free(cesa_dev->dma->padding_pool, req->padding, + req->padding_dma); + req->padding = NULL; +} + +static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + mv_cesa_ahash_dma_free_padding(&creq->req.dma); +} + +static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); + mv_cesa_dma_cleanup(&creq->req.dma.base); +} + +static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_cleanup(req); +} + static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); mv_cesa_ahash_free_cache(creq); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_last_cleanup(req); } static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) @@ -183,6 +289,14 @@ static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) return 0; } +static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; + + mv_cesa_dma_prepare(dreq, dreq->base.engine); +} + static void mv_cesa_ahash_std_prepare(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); @@ -197,8 +311,12 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req) static void mv_cesa_ahash_step(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); - mv_cesa_ahash_std_step(ahashreq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_dma_step(&creq->req.dma.base); + else + mv_cesa_ahash_std_step(ahashreq); } static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) @@ -209,7 +327,11 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) unsigned int digsize; int ret, i; - ret = mv_cesa_ahash_std_process(ahashreq, status); + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_dma_process(&creq->req.dma.base, status); + else + ret = mv_cesa_ahash_std_process(ahashreq, status); + if (ret == -EINPROGRESS) return ret; @@ -243,7 +365,10 @@ static void mv_cesa_ahash_prepare(struct crypto_async_request *req, creq->req.base.engine = engine; - mv_cesa_ahash_std_prepare(ahashreq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_prepare(ahashreq); + else + mv_cesa_ahash_std_prepare(ahashreq); digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); for (i = 0; i < digsize / 4; i++) @@ -258,6 +383,8 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) if (creq->last_req) mv_cesa_ahash_last_cleanup(ahashreq); + + mv_cesa_ahash_cleanup(ahashreq); } static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { @@ -325,14 +452,267 @@ static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) return 0; } +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + struct mv_cesa_op_ctx *op = NULL; + int ret; + + if (!creq->cache_ptr) + return NULL; + + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET, + ahashdreq->cache_dma, + creq->cache_ptr, + CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + if (!dma_iter->base.op_len) { + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, creq->cache_ptr); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + } + + return op; +} + +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_add_data(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_op_ctx *op; + int ret; + + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, dma_iter->base.op_len); + + if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) == + CESA_SA_DESC_CFG_FIRST_FRAG) + mv_cesa_update_op_cfg(&creq->op_tmpl, + CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + + /* Add input transfers */ + ret = mv_cesa_dma_add_op_transfers(chain, &dma_iter->base, + &dma_iter->src, flags); + if (ret) + return ERR_PTR(ret); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + + return op; +} + +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + struct mv_cesa_op_ctx *op, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + unsigned int len, trailerlen, padoff = 0; + int ret; + + if (!creq->last_req) + return op; + + if (op && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { + u32 frag = CESA_SA_DESC_CFG_NOT_FRAG; + + if ((mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) != + CESA_SA_DESC_CFG_FIRST_FRAG) + frag = CESA_SA_DESC_CFG_LAST_FRAG; + + mv_cesa_update_op_cfg(op, frag, CESA_SA_DESC_CFG_FRAG_MSK); + + return op; + } + + ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); + if (ret) + return ERR_PTR(ret); + + trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); + + if (op) { + len = min(CESA_SA_SRAM_PAYLOAD_SIZE - dma_iter->base.op_len, + trailerlen); + if (len) { + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET + + dma_iter->base.op_len, + ahashdreq->padding_dma, + len, CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + mv_cesa_set_mac_op_frag_len(op, + dma_iter->base.op_len + len); + padoff += len; + } + } + + if (padoff >= trailerlen) + return op; + + if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) != + CESA_SA_DESC_CFG_FIRST_FRAG) + mv_cesa_update_op_cfg(&creq->op_tmpl, + CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, trailerlen - padoff); + + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET, + ahashdreq->padding_dma + + padoff, + trailerlen - padoff, + CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + + return op; +} + +static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + struct mv_cesa_tdma_req *dreq = &ahashdreq->base; + struct mv_cesa_tdma_chain chain; + struct mv_cesa_ahash_dma_iter iter; + struct mv_cesa_op_ctx *op = NULL; + int ret; + + dreq->chain.first = NULL; + dreq->chain.last = NULL; + + if (creq->src_nents) { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + if (!ret) { + ret = -ENOMEM; + goto err; + } + } + + mv_cesa_tdma_desc_iter_init(&chain); + mv_cesa_ahash_req_iter_init(&iter, req); + + op = mv_cesa_ahash_dma_add_cache(&chain, &iter, + creq, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + + do { + if (!iter.base.op_len) + break; + + op = mv_cesa_ahash_dma_add_data(&chain, &iter, + creq, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + } while (mv_cesa_ahash_req_iter_next_op(&iter)); + + op = mv_cesa_ahash_dma_last_req(&chain, &iter, creq, op, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + + if (op) { + /* Add dummy desc to wait for crypto operation end */ + ret = mv_cesa_dma_add_dummy_end(&chain, flags); + if (ret) + goto err_free_tdma; + } + + if (!creq->last_req) + creq->cache_ptr = req->nbytes + creq->cache_ptr - + iter.base.len; + else + creq->cache_ptr = 0; + + dreq->chain = chain; + + return 0; + +err_free_tdma: + mv_cesa_dma_cleanup(dreq); + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); + +err: + mv_cesa_ahash_last_cleanup(req); + + return ret; +} + static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + int ret; + + if (cesa_dev->caps->has_tdma) + creq->req.base.type = CESA_DMA_REQ; + else + creq->req.base.type = CESA_STD_REQ; - creq->req.base.type = CESA_STD_REQ; creq->src_nents = sg_nents_for_len(req->src, req->nbytes); - return mv_cesa_ahash_cache_req(req, cached); + ret = mv_cesa_ahash_cache_req(req, cached); + if (ret) + return ret; + + if (*cached) + return 0; + + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_ahash_dma_req_init(req); + + return ret; } static int mv_cesa_ahash_update(struct ahash_request *req) @@ -349,7 +729,13 @@ static int mv_cesa_ahash_update(struct ahash_request *req) if (cached) return 0; - return mv_cesa_queue_req(&req->base); + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) { + mv_cesa_ahash_cleanup(req); + return ret; + } + + return ret; } static int mv_cesa_ahash_final(struct ahash_request *req) @@ -370,7 +756,11 @@ static int mv_cesa_ahash_final(struct ahash_request *req) if (cached) return 0; - return mv_cesa_queue_req(&req->base); + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ahash_cleanup(req); + + return ret; } static int mv_cesa_ahash_finup(struct ahash_request *req) @@ -391,7 +781,11 @@ static int mv_cesa_ahash_finup(struct ahash_request *req) if (cached) return 0; - return mv_cesa_queue_req(&req->base); + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ahash_cleanup(req); + + return ret; } static int mv_cesa_sha1_init(struct ahash_request *req) diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c new file mode 100644 index 000000000000..64a366c50174 --- /dev/null +++ b/drivers/crypto/marvell/tdma.c @@ -0,0 +1,224 @@ +/* + * Provide TDMA helper functions used by cipher and hash algorithm + * implementations. + * + * Author: Boris Brezillon + * Author: Arnaud Ebalard + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include "cesa.h" + +bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter, + struct mv_cesa_sg_dma_iter *sgiter, + unsigned int len) +{ + if (!sgiter->sg) + return false; + + sgiter->op_offset += len; + sgiter->offset += len; + if (sgiter->offset == sg_dma_len(sgiter->sg)) { + if (sg_is_last(sgiter->sg)) + return false; + sgiter->offset = 0; + sgiter->sg = sg_next(sgiter->sg); + } + + if (sgiter->op_offset == iter->op_len) + return false; + + return true; +} + +void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) +{ + struct mv_cesa_engine *engine = dreq->base.engine; + + writel(0, engine->regs + CESA_SA_CFG); + + mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); + writel(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B | + CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN, + engine->regs + CESA_TDMA_CONTROL); + + writel(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT | + CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS, + engine->regs + CESA_SA_CFG); + writel(dreq->chain.first->cur_dma, + engine->regs + CESA_TDMA_NEXT_ADDR); + writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); +} + +void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) +{ + struct mv_cesa_tdma_desc *tdma; + + for (tdma = dreq->chain.first; tdma;) { + struct mv_cesa_tdma_desc *old_tdma = tdma; + + if (tdma->flags & CESA_TDMA_OP) + dma_pool_free(cesa_dev->dma->op_pool, tdma->op, + le32_to_cpu(tdma->src)); + + tdma = tdma->next; + dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, + le32_to_cpu(old_tdma->cur_dma)); + } + + dreq->chain.first = NULL; + dreq->chain.last = NULL; +} + +void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, + struct mv_cesa_engine *engine) +{ + struct mv_cesa_tdma_desc *tdma; + + for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { + if (tdma->flags & CESA_TDMA_DST_IN_SRAM) + tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma); + + if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) + tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); + + if (tdma->flags & CESA_TDMA_OP) + mv_cesa_adjust_op(engine, tdma->op); + } +} + +static struct mv_cesa_tdma_desc * +mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) +{ + struct mv_cesa_tdma_desc *new_tdma = NULL; + dma_addr_t dma_handle; + + new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags, + &dma_handle); + if (!new_tdma) + return ERR_PTR(-ENOMEM); + + memset(new_tdma, 0, sizeof(*new_tdma)); + new_tdma->cur_dma = cpu_to_le32(dma_handle); + if (chain->last) { + chain->last->next_dma = new_tdma->cur_dma; + chain->last->next = new_tdma; + } else { + chain->first = new_tdma; + } + + chain->last = new_tdma; + + return new_tdma; +} + +struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, + const struct mv_cesa_op_ctx *op_templ, + bool skip_ctx, + gfp_t flags) +{ + struct mv_cesa_tdma_desc *tdma; + struct mv_cesa_op_ctx *op; + dma_addr_t dma_handle; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return ERR_CAST(tdma); + + op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle); + if (!op) + return ERR_PTR(-ENOMEM); + + *op = *op_templ; + + tdma = chain->last; + tdma->op = op; + tdma->byte_cnt = (skip_ctx ? sizeof(op->desc) : sizeof(*op)) | BIT(31); + tdma->src = dma_handle; + tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; + + return op; +} + +int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, + dma_addr_t dst, dma_addr_t src, u32 size, + u32 flags, gfp_t gfp_flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, gfp_flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + tdma->byte_cnt = size | BIT(31); + tdma->src = src; + tdma->dst = dst; + + flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); + tdma->flags = flags | CESA_TDMA_DATA; + + return 0; +} + +int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, + u32 flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + return 0; +} + +int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + tdma->byte_cnt = BIT(31); + + return 0; +} + +int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_dma_iter *dma_iter, + struct mv_cesa_sg_dma_iter *sgiter, + gfp_t gfp_flags) +{ + u32 flags = sgiter->dir == DMA_TO_DEVICE ? + CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM; + unsigned int len; + + do { + dma_addr_t dst, src; + int ret; + + len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter); + if (sgiter->dir == DMA_TO_DEVICE) { + dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; + src = sg_dma_address(sgiter->sg) + sgiter->offset; + } else { + dst = sg_dma_address(sgiter->sg) + sgiter->offset; + src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; + } + + ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len, + flags, gfp_flags); + if (ret) + return ret; + + } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len)); + + return 0; +} -- cgit v1.2.3 From 7b3aaaa095b437bfcb4e4c761a19f50294659f3a Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:22 +0200 Subject: crypto: marvell/cesa - add DES support Add support for DES operations. Signed-off-by: Boris Brezillon Signed-off-by: Arnaud Ebalard Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 2 + drivers/crypto/marvell/cesa.h | 2 + drivers/crypto/marvell/cipher.c | 150 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 154 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 986f024d73fa..212840e346d1 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -169,6 +169,8 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) } static struct crypto_alg *armada_370_cipher_algs[] = { + &mv_cesa_ecb_des_alg, + &mv_cesa_cbc_des_alg, &mv_cesa_ecb_aes_alg, &mv_cesa_cbc_aes_alg, }; diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index f8faf260b6d7..4b07209526bd 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -777,6 +777,8 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, extern struct ahash_alg mv_sha1_alg; extern struct ahash_alg mv_ahmac_sha1_alg; +extern struct crypto_alg mv_cesa_ecb_des_alg; +extern struct crypto_alg mv_cesa_cbc_des_alg; extern struct crypto_alg mv_cesa_ecb_aes_alg; extern struct crypto_alg mv_cesa_cbc_aes_alg; diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index e21783b83046..e420ea6c0ec4 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -13,9 +13,15 @@ */ #include +#include #include "cesa.h" +struct mv_cesa_des_ctx { + struct mv_cesa_ctx base; + u8 key[DES_KEY_SIZE]; +}; + struct mv_cesa_aes_ctx { struct mv_cesa_ctx base; struct crypto_aes_ctx aes; @@ -238,6 +244,30 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, return 0; } +static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + + if (len != DES_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + memcpy(ctx->key, key, DES_KEY_SIZE); + + return 0; +} + static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, const struct mv_cesa_op_ctx *op_templ) { @@ -364,6 +394,126 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, return ret; } +static int mv_cesa_des_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ret; + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, + CESA_SA_DESC_CFG_CRYPTM_MSK); + + memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); + + ret = mv_cesa_ablkcipher_req_init(req, tmpl); + if (ret) + return ret; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; +} + +static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_des_op(req, &tmpl); +} + +static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_des_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_ecb_des_alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "mv-ecb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = mv_cesa_des_setkey, + .encrypt = mv_cesa_ecb_des_encrypt, + .decrypt = mv_cesa_ecb_des_decrypt, + }, + }, +}; + +static int mv_cesa_cbc_des_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, + CESA_SA_DESC_CFG_CRYPTCM_MSK); + + memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE); + + return mv_cesa_des_op(req, tmpl); +} + +static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_cbc_des_op(req, &tmpl); +} + +static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_cbc_des_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_cbc_des_alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "mv-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = mv_cesa_des_setkey, + .encrypt = mv_cesa_cbc_des_encrypt, + .decrypt = mv_cesa_cbc_des_decrypt, + }, + }, +}; + static int mv_cesa_aes_op(struct ablkcipher_request *req, struct mv_cesa_op_ctx *tmpl) { -- cgit v1.2.3 From 4ada483978237068bf21c0dc318af676145bfed5 Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Thu, 18 Jun 2015 15:46:23 +0200 Subject: crypto: marvell/cesa - add Triple-DES support Add support for Triple-DES operations. Signed-off-by: Arnaud Ebalard Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 2 + drivers/crypto/marvell/cesa.h | 2 + drivers/crypto/marvell/cipher.c | 147 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 151 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 212840e346d1..c0b1b49cbba5 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -171,6 +171,8 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) static struct crypto_alg *armada_370_cipher_algs[] = { &mv_cesa_ecb_des_alg, &mv_cesa_cbc_des_alg, + &mv_cesa_ecb_des3_ede_alg, + &mv_cesa_cbc_des3_ede_alg, &mv_cesa_ecb_aes_alg, &mv_cesa_cbc_aes_alg, }; diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index 4b07209526bd..1229c3d75b71 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -779,6 +779,8 @@ extern struct ahash_alg mv_ahmac_sha1_alg; extern struct crypto_alg mv_cesa_ecb_des_alg; extern struct crypto_alg mv_cesa_cbc_des_alg; +extern struct crypto_alg mv_cesa_ecb_des3_ede_alg; +extern struct crypto_alg mv_cesa_cbc_des3_ede_alg; extern struct crypto_alg mv_cesa_ecb_aes_alg; extern struct crypto_alg mv_cesa_cbc_aes_alg; diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index e420ea6c0ec4..0745cf3b9c0e 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -22,6 +22,11 @@ struct mv_cesa_des_ctx { u8 key[DES_KEY_SIZE]; }; +struct mv_cesa_des3_ctx { + struct mv_cesa_ctx base; + u8 key[DES3_EDE_KEY_SIZE]; +}; + struct mv_cesa_aes_ctx { struct mv_cesa_ctx base; struct crypto_aes_ctx aes; @@ -268,6 +273,22 @@ static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, return 0; } +static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); + + if (len != DES3_EDE_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); + + return 0; +} + static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, const struct mv_cesa_op_ctx *op_templ) { @@ -514,6 +535,132 @@ struct crypto_alg mv_cesa_cbc_des_alg = { }, }; +static int mv_cesa_des3_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ret; + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, + CESA_SA_DESC_CFG_CRYPTM_MSK); + + memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); + + ret = mv_cesa_ablkcipher_req_init(req, tmpl); + if (ret) + return ret; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; +} + +static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_des3_op(req, &tmpl); +} + +static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_des3_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_ecb_des3_ede_alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "mv-ecb-des3-ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = mv_cesa_des3_ede_setkey, + .encrypt = mv_cesa_ecb_des3_ede_encrypt, + .decrypt = mv_cesa_ecb_des3_ede_decrypt, + }, + }, +}; + +static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE); + + return mv_cesa_des3_op(req, tmpl); +} + +static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_CBC | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_cbc_des3_op(req, &tmpl); +} + +static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_CBC | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_cbc_des3_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_cbc_des3_ede_alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "mv-cbc-des3-ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = mv_cesa_des3_ede_setkey, + .encrypt = mv_cesa_cbc_des3_ede_encrypt, + .decrypt = mv_cesa_cbc_des3_ede_decrypt, + }, + }, +}; + static int mv_cesa_aes_op(struct ablkcipher_request *req, struct mv_cesa_op_ctx *tmpl) { -- cgit v1.2.3 From 7aeef693d18d359134f47bf7b6621ec303b570f9 Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Thu, 18 Jun 2015 15:46:24 +0200 Subject: crypto: marvell/cesa - add MD5 support Add support for MD5 operations. Signed-off-by: Arnaud Ebalard Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 2 + drivers/crypto/marvell/cesa.h | 2 + drivers/crypto/marvell/hash.c | 172 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 174 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index c0b1b49cbba5..047c1c0e7eb4 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -178,7 +178,9 @@ static struct crypto_alg *armada_370_cipher_algs[] = { }; static struct ahash_alg *armada_370_ahash_algs[] = { + &mv_md5_alg, &mv_sha1_alg, + &mv_ahmac_md5_alg, &mv_ahmac_sha1_alg, }; diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index 1229c3d75b71..283e322cc234 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -774,7 +774,9 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, /* Algorithm definitions */ +extern struct ahash_alg mv_md5_alg; extern struct ahash_alg mv_sha1_alg; +extern struct ahash_alg mv_ahmac_md5_alg; extern struct ahash_alg mv_ahmac_sha1_alg; extern struct crypto_alg mv_cesa_ecb_des_alg; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 528da260a7cc..36bb756d5fa9 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -12,6 +12,7 @@ * by the Free Software Foundation. */ +#include #include #include "cesa.h" @@ -346,8 +347,16 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) ahashreq->nbytes - creq->cache_ptr); if (creq->last_req) { - for (i = 0; i < digsize / 4; i++) - creq->state[i] = cpu_to_be32(creq->state[i]); + for (i = 0; i < digsize / 4; i++) { + /* + * Hardware provides MD5 digest in a different + * endianness than SHA-1 and SHA-256 ones. + */ + if (digsize == MD5_DIGEST_SIZE) + creq->state[i] = cpu_to_le32(creq->state[i]); + else + creq->state[i] = cpu_to_be32(creq->state[i]); + } memcpy(ahashreq->result, creq->state, digsize); } @@ -788,6 +797,95 @@ static int mv_cesa_ahash_finup(struct ahash_request *req) return ret; } +static int mv_cesa_md5_init(struct ahash_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_md5_export(struct ahash_request *req, void *out) +{ + struct md5_state *out_state = out; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + + out_state->byte_count = creq->len; + memcpy(out_state->hash, creq->state, digsize); + memset(out_state->block, 0, sizeof(out_state->block)); + if (creq->cache) + memcpy(out_state->block, creq->cache, creq->cache_ptr); + + return 0; +} + +static int mv_cesa_md5_import(struct ahash_request *req, const void *in) +{ + const struct md5_state *in_state = in; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + unsigned int cache_ptr; + int ret; + + creq->len = in_state->byte_count; + memcpy(creq->state, in_state->hash, digsize); + creq->cache_ptr = 0; + + cache_ptr = creq->len % sizeof(in_state->block); + if (!cache_ptr) + return 0; + + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + + memcpy(creq->cache, in_state->block, cache_ptr); + creq->cache_ptr = cache_ptr; + + return 0; +} + +static int mv_cesa_md5_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_md5_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_md5_alg = { + .init = mv_cesa_md5_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_md5_digest, + .export = mv_cesa_md5_export, + .import = mv_cesa_md5_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "md5", + .cra_driver_name = "mv-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), + .cra_init = mv_cesa_ahash_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + static int mv_cesa_sha1_init(struct ahash_request *req) { struct mv_cesa_op_ctx tmpl; @@ -1043,6 +1141,76 @@ static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) return 0; } +static int mv_cesa_ahmac_md5_init(struct ahash_request *req) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); + memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct md5_state istate, ostate; + int ret, i; + + ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(istate.hash); i++) + ctx->iv[i] = be32_to_cpu(istate.hash[i]); + + for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) + ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); + + return 0; +} + +static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_ahmac_md5_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_ahmac_md5_alg = { + .init = mv_cesa_ahmac_md5_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_ahmac_md5_digest, + .setkey = mv_cesa_ahmac_md5_setkey, + .export = mv_cesa_md5_export, + .import = mv_cesa_md5_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "mv-hmac-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), + .cra_init = mv_cesa_ahmac_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) { struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); -- cgit v1.2.3 From f85a762e49bda3cba143ab0db8bde6d2bbe8baf9 Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Thu, 18 Jun 2015 15:46:25 +0200 Subject: crypto: marvell/cesa - add SHA256 support Add support for SHA256 operations. Signed-off-by: Arnaud Ebalard Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 2 + drivers/crypto/marvell/cesa.h | 2 + drivers/crypto/marvell/hash.c | 159 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 047c1c0e7eb4..9c43cd7e5d1d 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -180,8 +180,10 @@ static struct crypto_alg *armada_370_cipher_algs[] = { static struct ahash_alg *armada_370_ahash_algs[] = { &mv_md5_alg, &mv_sha1_alg, + &mv_sha256_alg, &mv_ahmac_md5_alg, &mv_ahmac_sha1_alg, + &mv_ahmac_sha256_alg, }; static const struct mv_cesa_caps armada_370_caps = { diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index 283e322cc234..b60698b30d30 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -776,8 +776,10 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, extern struct ahash_alg mv_md5_alg; extern struct ahash_alg mv_sha1_alg; +extern struct ahash_alg mv_sha256_alg; extern struct ahash_alg mv_ahmac_md5_alg; extern struct ahash_alg mv_ahmac_sha1_alg; +extern struct ahash_alg mv_ahmac_sha256_alg; extern struct crypto_alg mv_cesa_ecb_des_alg; extern struct crypto_alg mv_cesa_cbc_des_alg; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 36bb756d5fa9..ae9272eb9c1a 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -975,6 +975,95 @@ struct ahash_alg mv_sha1_alg = { } }; +static int mv_cesa_sha256_init(struct ahash_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_sha256_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_sha256_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +static int mv_cesa_sha256_export(struct ahash_request *req, void *out) +{ + struct sha256_state *out_state = out; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int ds = crypto_ahash_digestsize(ahash); + + out_state->count = creq->len; + memcpy(out_state->state, creq->state, ds); + memset(out_state->buf, 0, sizeof(out_state->buf)); + if (creq->cache) + memcpy(out_state->buf, creq->cache, creq->cache_ptr); + + return 0; +} + +static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) +{ + const struct sha256_state *in_state = in; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + unsigned int cache_ptr; + int ret; + + creq->len = in_state->count; + memcpy(creq->state, in_state->state, digsize); + creq->cache_ptr = 0; + + cache_ptr = creq->len % SHA256_BLOCK_SIZE; + if (!cache_ptr) + return 0; + + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + + memcpy(creq->cache, in_state->buf, cache_ptr); + creq->cache_ptr = cache_ptr; + + return 0; +} + +struct ahash_alg mv_sha256_alg = { + .init = mv_cesa_sha256_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_sha256_digest, + .export = mv_cesa_sha256_export, + .import = mv_cesa_sha256_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "sha256", + .cra_driver_name = "mv-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), + .cra_init = mv_cesa_ahash_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + struct mv_cesa_ahash_result { struct completion completion; int error; @@ -1280,3 +1369,73 @@ struct ahash_alg mv_ahmac_sha1_alg = { } } }; + +static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct sha256_state istate, ostate; + int ret, i; + + ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(istate.state); i++) + ctx->iv[i] = be32_to_cpu(istate.state[i]); + + for (i = 0; i < ARRAY_SIZE(ostate.state); i++) + ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); + + return 0; +} + +static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); + memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_ahmac_sha256_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_ahmac_sha256_alg = { + .init = mv_cesa_ahmac_sha256_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_ahmac_sha256_digest, + .setkey = mv_cesa_ahmac_sha256_setkey, + .export = mv_cesa_sha256_export, + .import = mv_cesa_sha256_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "mv-hmac-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), + .cra_init = mv_cesa_ahmac_cra_init, + .cra_module = THIS_MODULE, + } + } +}; -- cgit v1.2.3 From 898c9d5ea2182287620f8995b32e457d7b3b54ca Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:26 +0200 Subject: crypto: marvell/cesa - add support for all armada SoCs Add CESA IP description for all the missing armada SoCs (XP, 375 and 38x). Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 9c43cd7e5d1d..af590bf853d2 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -195,8 +195,20 @@ static const struct mv_cesa_caps armada_370_caps = { .has_tdma = true, }; +static const struct mv_cesa_caps armada_xp_caps = { + .nengines = 2, + .cipher_algs = armada_370_cipher_algs, + .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), + .ahash_algs = armada_370_ahash_algs, + .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), + .has_tdma = true, +}; + static const struct of_device_id mv_cesa_of_match_table[] = { { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, + { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps }, + { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps }, + { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps }, {} }; MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); -- cgit v1.2.3 From 64c55d499bdadf242e83ad6e5670c72da6c1a3c9 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:27 +0200 Subject: crypto: marvell/cesa - add allhwsupport module parameter The old and new marvell CESA drivers both support Orion and Kirkwood SoCs. Add a module parameter to choose whether these SoCs should be attached to the new or the old driver. The default policy is to keep attaching those IPs to the old driver if it is enabled, until we decide the new CESA driver is stable/secure enough. Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index af590bf853d2..a05b5cb3aea7 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -31,6 +31,10 @@ #include "cesa.h" +static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA); +module_param_named(allhwsupport, allhwsupport, int, 0444); +MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)"); + struct mv_cesa_dev *cesa_dev; static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) -- cgit v1.2.3 From 0bf6948995f9f7c4b0ce062db793c205e921ee48 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:28 +0200 Subject: crypto: marvell/cesa - add support for Orion SoCs Add the Orion SoC description, and select this implementation by default to support non-DT probing: Orion is the only platform where non-DT boards are declaring the CESA block. Control the allhwsupport module parameter to avoid probing the CESA IP when the old CESA driver is enabled (unless it is explicitly requested to do so). Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 42 +++++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index a05b5cb3aea7..8e5ea7218bb2 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -172,6 +172,22 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) crypto_unregister_alg(cesa->caps->cipher_algs[i]); } +static struct crypto_alg *orion_cipher_algs[] = { + &mv_cesa_ecb_des_alg, + &mv_cesa_cbc_des_alg, + &mv_cesa_ecb_des3_ede_alg, + &mv_cesa_cbc_des3_ede_alg, + &mv_cesa_ecb_aes_alg, + &mv_cesa_cbc_aes_alg, +}; + +static struct ahash_alg *orion_ahash_algs[] = { + &mv_md5_alg, + &mv_sha1_alg, + &mv_ahmac_md5_alg, + &mv_ahmac_sha1_alg, +}; + static struct crypto_alg *armada_370_cipher_algs[] = { &mv_cesa_ecb_des_alg, &mv_cesa_cbc_des_alg, @@ -190,6 +206,15 @@ static struct ahash_alg *armada_370_ahash_algs[] = { &mv_ahmac_sha256_alg, }; +static const struct mv_cesa_caps orion_caps = { + .nengines = 1, + .cipher_algs = orion_cipher_algs, + .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), + .ahash_algs = orion_ahash_algs, + .nahash_algs = ARRAY_SIZE(orion_ahash_algs), + .has_tdma = false, +}; + static const struct mv_cesa_caps armada_370_caps = { .nengines = 1, .cipher_algs = armada_370_cipher_algs, @@ -209,6 +234,7 @@ static const struct mv_cesa_caps armada_xp_caps = { }; static const struct of_device_id mv_cesa_of_match_table[] = { + { .compatible = "marvell,orion-crypto", .data = &orion_caps }, { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps }, { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps }, @@ -334,7 +360,7 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx) static int mv_cesa_probe(struct platform_device *pdev) { - const struct mv_cesa_caps *caps = NULL; + const struct mv_cesa_caps *caps = &orion_caps; const struct mbus_dram_target_info *dram; const struct of_device_id *match; struct device *dev = &pdev->dev; @@ -349,14 +375,16 @@ static int mv_cesa_probe(struct platform_device *pdev) return -EEXIST; } - if (!dev->of_node) - return -ENOTSUPP; + if (dev->of_node) { + match = of_match_node(mv_cesa_of_match_table, dev->of_node); + if (!match || !match->data) + return -ENOTSUPP; - match = of_match_node(mv_cesa_of_match_table, dev->of_node); - if (!match || !match->data) - return -ENOTSUPP; + caps = match->data; + } - caps = match->data; + if (caps == &orion_caps && !allhwsupport) + return -ENOTSUPP; cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); if (!cesa) -- cgit v1.2.3 From 7240425579b881a3e26ba62a1bca29b45d4bfadc Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Thu, 18 Jun 2015 15:46:29 +0200 Subject: crypto: marvell/cesa - add support for Kirkwood and Dove SoCs Add the Kirkwood and Dove SoC descriptions, and control the allhwsupport module parameter to avoid probing the CESA IP when the old CESA driver is enabled (unless it is explicitly requested to do so). Signed-off-by: Arnaud Ebalard Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 8e5ea7218bb2..a432633bced4 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -215,6 +215,15 @@ static const struct mv_cesa_caps orion_caps = { .has_tdma = false, }; +static const struct mv_cesa_caps kirkwood_caps = { + .nengines = 1, + .cipher_algs = orion_cipher_algs, + .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), + .ahash_algs = orion_ahash_algs, + .nahash_algs = ARRAY_SIZE(orion_ahash_algs), + .has_tdma = true, +}; + static const struct mv_cesa_caps armada_370_caps = { .nengines = 1, .cipher_algs = armada_370_cipher_algs, @@ -235,6 +244,8 @@ static const struct mv_cesa_caps armada_xp_caps = { static const struct of_device_id mv_cesa_of_match_table[] = { { .compatible = "marvell,orion-crypto", .data = &orion_caps }, + { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps }, + { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps }, { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps }, { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps }, @@ -383,7 +394,7 @@ static int mv_cesa_probe(struct platform_device *pdev) caps = match->data; } - if (caps == &orion_caps && !allhwsupport) + if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport) return -ENOTSUPP; cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); -- cgit v1.2.3 From 8000112cedb8743bad67997a96dc94877a36ce0f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 19 Jun 2015 12:07:54 +0800 Subject: crypto: nx - Check for bogus firmware properties MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The nx driver reads two crucial paramters from the firmware for each crypto algorithm, the maximum SG list length and byte limit. Unfortunately those two parameters may be bogus, or worse they may be absent altogether. When this happens the algorithms will still register successfully but will fail when used or tested. This patch adds checks to report any firmware entries which are found to be bogus, and avoid registering algorithms which have bogus parameters. A warning is also printed when an algorithm is not registered because of this as there may have been no firmware entries for it at all. Reported-by: Ondrej Moriš Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx.c | 150 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 118 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 847350534cc1..f6198f29a4a8 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -398,6 +399,13 @@ static void nx_of_update_msc(struct device *dev, goto next_loop; } + if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) { + dev_warn(dev, "bogus sglen/databytelen: " + "%u/%u (ignored)\n", trip->sglen, + trip->databytelen); + goto next_loop; + } + switch (trip->keybitlen) { case 128: case 160: @@ -490,6 +498,72 @@ static void nx_of_init(struct device *dev, struct nx_of *props) nx_of_update_msc(dev, p, props); } +static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot) +{ + struct alg_props *props = &nx_driver.of.ap[fc][mode][slot]; + + if (!props->sglen || props->databytelen < NX_PAGE_SIZE) { + if (dev) + dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: " + "%u/%u (ignored)\n", fc, mode, slot, + props->sglen, props->databytelen); + return false; + } + + return true; +} + +static bool nx_check_props(struct device *dev, u32 fc, u32 mode) +{ + int i; + + for (i = 0; i < 3; i++) + if (!nx_check_prop(dev, fc, mode, i)) + return false; + + return true; +} + +static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode) +{ + return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? + crypto_register_alg(alg) : 0; +} + +static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode) +{ + return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? + crypto_register_aead(alg) : 0; +} + +static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot) +{ + return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev, + fc, mode, slot) : + nx_check_props(&nx_driver.viodev->dev, fc, mode)) ? + crypto_register_shash(alg) : 0; +} + +static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode) +{ + if (nx_check_props(NULL, fc, mode)) + crypto_unregister_alg(alg); +} + +static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode) +{ + if (nx_check_props(NULL, fc, mode)) + crypto_unregister_aead(alg); +} + +static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode, + int slot) +{ + if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) : + nx_check_props(NULL, fc, mode)) + crypto_unregister_shash(alg); +} + /** * nx_register_algs - register algorithms with the crypto API * @@ -514,72 +588,77 @@ static int nx_register_algs(void) nx_driver.of.status = NX_OKAY; - rc = crypto_register_alg(&nx_ecb_aes_alg); + rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); if (rc) goto out; - rc = crypto_register_alg(&nx_cbc_aes_alg); + rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); if (rc) goto out_unreg_ecb; - rc = crypto_register_alg(&nx_ctr_aes_alg); + rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); if (rc) goto out_unreg_cbc; - rc = crypto_register_alg(&nx_ctr3686_aes_alg); + rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); if (rc) goto out_unreg_ctr; - rc = crypto_register_aead(&nx_gcm_aes_alg); + rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); if (rc) goto out_unreg_ctr3686; - rc = crypto_register_aead(&nx_gcm4106_aes_alg); + rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); if (rc) goto out_unreg_gcm; - rc = crypto_register_alg(&nx_ccm_aes_alg); + rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); if (rc) goto out_unreg_gcm4106; - rc = crypto_register_alg(&nx_ccm4309_aes_alg); + rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); if (rc) goto out_unreg_ccm; - rc = crypto_register_shash(&nx_shash_sha256_alg); + rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, + NX_PROPS_SHA256); if (rc) goto out_unreg_ccm4309; - rc = crypto_register_shash(&nx_shash_sha512_alg); + rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, + NX_PROPS_SHA512); if (rc) goto out_unreg_s256; - rc = crypto_register_shash(&nx_shash_aes_xcbc_alg); + rc = nx_register_shash(&nx_shash_aes_xcbc_alg, + NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); if (rc) goto out_unreg_s512; goto out; out_unreg_s512: - crypto_unregister_shash(&nx_shash_sha512_alg); + nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, + NX_PROPS_SHA512); out_unreg_s256: - crypto_unregister_shash(&nx_shash_sha256_alg); + nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, + NX_PROPS_SHA256); out_unreg_ccm4309: - crypto_unregister_alg(&nx_ccm4309_aes_alg); + nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); out_unreg_ccm: - crypto_unregister_alg(&nx_ccm_aes_alg); + nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); out_unreg_gcm4106: - crypto_unregister_aead(&nx_gcm4106_aes_alg); + nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); out_unreg_gcm: - crypto_unregister_aead(&nx_gcm_aes_alg); + nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); out_unreg_ctr3686: - crypto_unregister_alg(&nx_ctr3686_aes_alg); + nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); out_unreg_ctr: - crypto_unregister_alg(&nx_ctr_aes_alg); + nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); out_unreg_cbc: - crypto_unregister_alg(&nx_cbc_aes_alg); + nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); out_unreg_ecb: - crypto_unregister_alg(&nx_ecb_aes_alg); + nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); out: return rc; } @@ -725,17 +804,24 @@ static int nx_remove(struct vio_dev *viodev) if (nx_driver.of.status == NX_OKAY) { NX_DEBUGFS_FINI(&nx_driver); - crypto_unregister_alg(&nx_ccm_aes_alg); - crypto_unregister_alg(&nx_ccm4309_aes_alg); - crypto_unregister_aead(&nx_gcm_aes_alg); - crypto_unregister_aead(&nx_gcm4106_aes_alg); - crypto_unregister_alg(&nx_ctr_aes_alg); - crypto_unregister_alg(&nx_ctr3686_aes_alg); - crypto_unregister_alg(&nx_cbc_aes_alg); - crypto_unregister_alg(&nx_ecb_aes_alg); - crypto_unregister_shash(&nx_shash_sha256_alg); - crypto_unregister_shash(&nx_shash_sha512_alg); - crypto_unregister_shash(&nx_shash_aes_xcbc_alg); + nx_unregister_shash(&nx_shash_aes_xcbc_alg, + NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); + nx_unregister_shash(&nx_shash_sha512_alg, + NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); + nx_unregister_shash(&nx_shash_sha256_alg, + NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); + nx_unregister_alg(&nx_ccm4309_aes_alg, + NX_FC_AES, NX_MODE_AES_CCM); + nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); + nx_unregister_aead(&nx_gcm4106_aes_alg, + NX_FC_AES, NX_MODE_AES_GCM); + nx_unregister_aead(&nx_gcm_aes_alg, + NX_FC_AES, NX_MODE_AES_GCM); + nx_unregister_alg(&nx_ctr3686_aes_alg, + NX_FC_AES, NX_MODE_AES_CTR); + nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); + nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); + nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); } return 0; -- cgit v1.2.3 From 1bd2cd6bc6ad8b582046818189766f109687b3fe Mon Sep 17 00:00:00 2001 From: Michael van der Westhuizen Date: Fri, 19 Jun 2015 15:55:51 +0200 Subject: crypto: picoxcell - Update to the current clk API The picoXcell hardware crypto accelerator driver was using an older version of the clk framework, and not (un)preparing the clock before enabling/disabling it. This change uses the handy clk_prepare_enable function to interact with the current clk framework correctly. Signed-off-by: Michael van der Westhuizen Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index c2fd860745ad..4f56f3681abd 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1755,15 +1755,15 @@ static int spacc_probe(struct platform_device *pdev) return PTR_ERR(engine->clk); } - if (clk_enable(engine->clk)) { - dev_info(&pdev->dev, "unable to enable clk\n"); + if (clk_prepare_enable(engine->clk)) { + dev_info(&pdev->dev, "unable to prepare/enable clk\n"); clk_put(engine->clk); return -EIO; } err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); if (err) { - clk_disable(engine->clk); + clk_disable_unprepare(engine->clk); clk_put(engine->clk); return err; } @@ -1831,7 +1831,7 @@ static int spacc_remove(struct platform_device *pdev) crypto_unregister_alg(&alg->alg); } - clk_disable(engine->clk); + clk_disable_unprepare(engine->clk); clk_put(engine->clk); return 0; -- cgit v1.2.3 From 7eac7144dd1ef11c05a5f7605de6afa3770982e4 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 20 Jun 2015 15:30:22 -0300 Subject: crypto: sahara - propagate the error on clk_disable_unprepare() failure clk_prepare_enable() may fail, so we should better check its return value and propagate it in the case of error. Signed-off-by: Fabio Estevam Signed-off-by: Herbert Xu --- drivers/crypto/sahara.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 6be377f6b9e7..397a500b3d8a 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -1578,8 +1578,12 @@ static int sahara_probe(struct platform_device *pdev) init_completion(&dev->dma_completion); - clk_prepare_enable(dev->clk_ipg); - clk_prepare_enable(dev->clk_ahb); + err = clk_prepare_enable(dev->clk_ipg); + if (err) + goto err_link; + err = clk_prepare_enable(dev->clk_ahb); + if (err) + goto clk_ipg_disable; version = sahara_read(dev, SAHARA_REG_VERSION); if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) { @@ -1619,10 +1623,11 @@ err_algs: dma_free_coherent(&pdev->dev, SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), dev->hw_link[0], dev->hw_phys_link[0]); - clk_disable_unprepare(dev->clk_ipg); - clk_disable_unprepare(dev->clk_ahb); kthread_stop(dev->kthread); dev_ptr = NULL; + clk_disable_unprepare(dev->clk_ahb); +clk_ipg_disable: + clk_disable_unprepare(dev->clk_ipg); err_link: dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, -- cgit v1.2.3 From fe55dfdcdfabf160ab0c14617725c57c7a1facfc Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 22 Jun 2015 09:22:14 +0200 Subject: crypto: marvell/cesa - remove COMPILE_TEST dependency The CESA driver calls phys_to_virt() which is not available on all architectures. Remove the depency on COMPILE_TEST to prevent building this driver on non ARM architectures. Signed-off-by: Boris Brezillon Reported-by: Paul Gortmaker Suggested-by: Herbert Xu Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index fd69aa04b475..4044125fb5d5 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -175,7 +175,7 @@ config CRYPTO_DEV_MV_CESA config CRYPTO_DEV_MARVELL_CESA tristate "New Marvell's Cryptographic Engine driver" - depends on (PLAT_ORION || ARCH_MVEBU || COMPILE_TEST) && HAS_DMA && HAS_IOMEM + depends on PLAT_ORION || ARCH_MVEBU select CRYPTO_AES select CRYPTO_DES select CRYPTO_BLKCIPHER -- cgit v1.2.3