diff options
author | Simon Sandström <simon@nikanor.nu> | 2017-07-18 23:03:54 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-07-28 07:53:21 +0300 |
commit | 4ff66c46b04ccd207c1f7ffd8393743261fe8fd6 (patch) | |
tree | 06e59debbaef82f114e6d045df0d988684fbc23c /drivers | |
parent | 3151c1df14e685dd2c27de7bec02b29029cfc837 (diff) | |
download | linux-4ff66c46b04ccd207c1f7ffd8393743261fe8fd6.tar.xz |
staging: ccree: Fix alignment issues in ssi_buffer_mgr.c
Fixes checkpatch.pl alignment warnings.
Signed-off-by: Simon Sandström <simon@nikanor.nu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/staging/ccree/ssi_buffer_mgr.c | 40 |
1 files changed, 24 insertions, 16 deletions
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c index 6579a54f9dc4..63936091d524 100644 --- a/drivers/staging/ccree/ssi_buffer_mgr.c +++ b/drivers/staging/ccree/ssi_buffer_mgr.c @@ -371,7 +371,7 @@ static int ssi_buffer_mgr_map_scatterlist( *mapped_nents = 1; } else { /*sg_is_last*/ *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes, - &is_chained); + &is_chained); if (*nents > max_sg_nents) { *nents = 0; SSI_LOG_ERR("Too many fragments. current %d max %d\n", @@ -393,9 +393,9 @@ static int ssi_buffer_mgr_map_scatterlist( * must have the same nents before and after map */ *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev, - sg, - *nents, - direction); + sg, + *nents, + direction); if (unlikely(*mapped_nents != *nents)) { *nents = *mapped_nents; SSI_LOG_ERR("dma_map_sg() sg buffer failed\n"); @@ -783,8 +783,8 @@ static inline int ssi_buffer_mgr_aead_chain_iv( goto chain_iv_exit; } - areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, - hw_iv_size, DMA_BIDIRECTIONAL); + areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) { SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n", hw_iv_size, req->iv); @@ -1323,8 +1323,9 @@ int ssi_buffer_mgr_map_aead_request( req->cryptlen : (req->cryptlen - authsize); - areq_ctx->mac_buf_dma_addr = dma_map_single(dev, - areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); + areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf, + MAX_MAC_SIZE, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) { SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n", MAX_MAC_SIZE, areq_ctx->mac_buf); @@ -1334,8 +1335,9 @@ int ssi_buffer_mgr_map_aead_request( if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev, - (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET), - AES_BLOCK_SIZE, DMA_TO_DEVICE); + (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET), + AES_BLOCK_SIZE, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) { SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK " @@ -1356,7 +1358,9 @@ int ssi_buffer_mgr_map_aead_request( #if SSI_CC_HAS_AES_GCM if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { areq_ctx->hkey_dma_addr = dma_map_single(dev, - areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); + areq_ctx->hkey, + AES_BLOCK_SIZE, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) { SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, areq_ctx->hkey); @@ -1365,7 +1369,9 @@ int ssi_buffer_mgr_map_aead_request( } areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev, - &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE); + &areq_ctx->gcm_len_block, + AES_BLOCK_SIZE, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) { SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); @@ -1374,8 +1380,9 @@ int ssi_buffer_mgr_map_aead_request( } areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev, - areq_ctx->gcm_iv_inc1, - AES_BLOCK_SIZE, DMA_TO_DEVICE); + areq_ctx->gcm_iv_inc1, + AES_BLOCK_SIZE, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) { SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK " @@ -1387,8 +1394,9 @@ int ssi_buffer_mgr_map_aead_request( } areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev, - areq_ctx->gcm_iv_inc2, - AES_BLOCK_SIZE, DMA_TO_DEVICE); + areq_ctx->gcm_iv_inc2, + AES_BLOCK_SIZE, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) { SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK " |