1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Shared glue code for 128bit block ciphers
*
* Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*/
#include <linux/module.h>
#include <crypto/b128ops.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <asm/crypto/glue_helper.h>
int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
struct skcipher_request *req)
{
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
struct skcipher_walk walk;
bool fpu_enabled = false;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
unsigned int func_bytes;
unsigned int i;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
&walk, fpu_enabled, nbytes);
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
if (nbytes < func_bytes)
continue;
/* Process multi-block batch */
do {
gctx->funcs[i].fn_u.ecb(ctx, dst, src);
src += func_bytes;
dst += func_bytes;
nbytes -= func_bytes;
} while (nbytes >= func_bytes);
if (nbytes < bsize)
break;
}
err = skcipher_walk_done(&walk, nbytes);
}
glue_fpu_end(fpu_enabled);
return err;
}
EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
struct skcipher_request *req)
{
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
const u128 *src = (u128 *)walk.src.virt.addr;
u128 *dst = (u128 *)walk.dst.virt.addr;
u128 *iv = (u128 *)walk.iv;
do {
u128_xor(dst, src, iv);
fn(ctx, (u8 *)dst, (u8 *)dst);
iv = dst;
src++;
dst++;
nbytes -= bsize;
} while (nbytes >= bsize);
*(u128 *)walk.iv = *iv;
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
struct skcipher_request *req)
{
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
struct skcipher_walk walk;
bool fpu_enabled = false;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
const u128 *src = walk.src.virt.addr;
u128 *dst = walk.dst.virt.addr;
unsigned int func_bytes, num_blocks;
unsigned int i;
u128 last_iv;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
&walk, fpu_enabled, nbytes);
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
last_iv = *src;
for (i = 0; i < gctx->num_funcs; i++) {
num_blocks = gctx->funcs[i].num_blocks;
func_bytes = bsize * num_blocks;
if (nbytes < func_bytes)
continue;
/* Process multi-block batch */
do {
src -= num_blocks - 1;
dst -= num_blocks - 1;
gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
(const u8 *)src);
nbytes -= func_bytes;
if (nbytes < bsize)
goto done;
u128_xor(dst, dst, --src);
dst--;
} while (nbytes >= func_bytes);
}
done:
u128_xor(dst, dst, (u128 *)walk.iv);
*(u128 *)walk.iv = last_iv;
err = skcipher_walk_done(&walk, nbytes);
}
glue_fpu_end(fpu_enabled);
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
MODULE_LICENSE("GPL");
|