diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 21:22:54 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 21:22:54 +0300 |
commit | 70477371dc350746d10431d74f0f213a8d59924c (patch) | |
tree | 6271978b6e4ee4b1e6f22775ad7fc0930c09d3ee /drivers | |
parent | 09fd671ccb2475436bd5f597f751ca4a7d177aea (diff) | |
parent | 34074205bb9f04b416efb3cbedcd90f418c86200 (diff) | |
download | linux-70477371dc350746d10431d74f0f213a8d59924c.tar.xz |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 4.6:
API:
- Convert remaining crypto_hash users to shash or ahash, also convert
blkcipher/ablkcipher users to skcipher.
- Remove crypto_hash interface.
- Remove crypto_pcomp interface.
- Add crypto engine for async cipher drivers.
- Add akcipher documentation.
- Add skcipher documentation.
Algorithms:
- Rename crypto/crc32 to avoid name clash with lib/crc32.
- Fix bug in keywrap where we zero the wrong pointer.
Drivers:
- Support T5/M5, T7/M7 SPARC CPUs in n2 hwrng driver.
- Add PIC32 hwrng driver.
- Support BCM6368 in bcm63xx hwrng driver.
- Pack structs for 32-bit compat users in qat.
- Use crypto engine in omap-aes.
- Add support for sama5d2x SoCs in atmel-sha.
- Make atmel-sha available again.
- Make sahara hashing available again.
- Make ccp hashing available again.
- Make sha1-mb available again.
- Add support for multiple devices in ccp.
- Improve DMA performance in caam.
- Add hashing support to rockchip"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (116 commits)
crypto: qat - remove redundant arbiter configuration
crypto: ux500 - fix checks of error code returned by devm_ioremap_resource()
crypto: atmel - fix checks of error code returned by devm_ioremap_resource()
crypto: qat - Change the definition of icp_qat_uof_regtype
hwrng: exynos - use __maybe_unused to hide pm functions
crypto: ccp - Add abstraction for device-specific calls
crypto: ccp - CCP versioning support
crypto: ccp - Support for multiple CCPs
crypto: ccp - Remove check for x86 family and model
crypto: ccp - memset request context to zero during import
lib/mpi: use "static inline" instead of "extern inline"
lib/mpi: avoid assembler warning
hwrng: bcm63xx - fix non device tree compatibility
crypto: testmgr - allow rfc3686 aes-ctr variants in fips mode.
crypto: qat - The AE id should be less than the maximal AE number
lib/mpi: Endianness fix
crypto: rockchip - add hash support for crypto engine in rk3288
crypto: xts - fix compile errors
crypto: doc - add skcipher API documentation
crypto: doc - update AEAD AD handling
...
Diffstat (limited to 'drivers')
69 files changed, 2678 insertions, 1540 deletions
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 99e773cb70d0..3d31761c0ed0 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c @@ -21,9 +21,9 @@ #include <linux/module.h> +#include <crypto/skcipher.h> #include <linux/init.h> #include <linux/string.h> -#include <linux/crypto.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include <asm/uaccess.h> @@ -46,7 +46,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) char *cipher; char *mode; char *cmsp = cms; /* c-m string pointer */ - struct crypto_blkcipher *tfm; + struct crypto_skcipher *tfm; /* encryption breaks for non sector aligned offsets */ @@ -82,12 +82,12 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) *cmsp++ = ')'; *cmsp = 0; - tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return PTR_ERR(tfm); - err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key, - info->lo_encrypt_key_size); + err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key, + info->lo_encrypt_key_size); if (err != 0) goto out_free_tfm; @@ -96,17 +96,14 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) return 0; out_free_tfm: - crypto_free_blkcipher(tfm); + crypto_free_skcipher(tfm); out: return err; } -typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc, - struct scatterlist *sg_out, - struct scatterlist *sg_in, - unsigned int nsg); +typedef int (*encdec_cbc_t)(struct skcipher_request *req); static int cryptoloop_transfer(struct loop_device *lo, int cmd, @@ -114,11 +111,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd, struct page *loop_page, unsigned loop_off, int size, sector_t IV) { - struct crypto_blkcipher *tfm = lo->key_data; - struct blkcipher_desc desc = { - .tfm = tfm, - .flags = CRYPTO_TFM_REQ_MAY_SLEEP, - }; + struct crypto_skcipher *tfm = lo->key_data; + SKCIPHER_REQUEST_ON_STACK(req, tfm); struct scatterlist sg_out; struct scatterlist sg_in; @@ -127,6 +121,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd, unsigned in_offs, out_offs; int err; + skcipher_request_set_tfm(req, tfm); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + sg_init_table(&sg_out, 1); sg_init_table(&sg_in, 1); @@ -135,13 +133,13 @@ cryptoloop_transfer(struct loop_device *lo, int cmd, in_offs = raw_off; out_page = loop_page; out_offs = loop_off; - encdecfunc = crypto_blkcipher_crt(tfm)->decrypt; + encdecfunc = crypto_skcipher_decrypt; } else { in_page = loop_page; in_offs = loop_off; out_page = raw_page; out_offs = raw_off; - encdecfunc = crypto_blkcipher_crt(tfm)->encrypt; + encdecfunc = crypto_skcipher_encrypt; } while (size > 0) { @@ -152,10 +150,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd, sg_set_page(&sg_in, in_page, sz, in_offs); sg_set_page(&sg_out, out_page, sz, out_offs); - desc.info = iv; - err = encdecfunc(&desc, &sg_out, &sg_in, sz); + skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv); + err = encdecfunc(req); if (err) - return err; + goto out; IV++; size -= sz; @@ -163,7 +161,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd, out_offs += sz; } - return 0; + err = 0; + +out: + skcipher_request_zero(req); + return err; } static int @@ -175,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) static int cryptoloop_release(struct loop_device *lo) { - struct crypto_blkcipher *tfm = lo->key_data; + struct crypto_skcipher *tfm = lo->key_data; if (tfm != NULL) { - crypto_free_blkcipher(tfm); + crypto_free_skcipher(tfm); lo->key_data = NULL; return 0; } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 34bc84efc29e..c227fd4cad75 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -26,13 +26,13 @@ #ifndef _DRBD_INT_H #define _DRBD_INT_H +#include <crypto/hash.h> #include <linux/compiler.h> #include <linux/types.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/bitops.h> #include <linux/slab.h> -#include <linux/crypto.h> #include <linux/ratelimit.h> #include <linux/tcp.h> #include <linux/mutex.h> @@ -724,11 +724,11 @@ struct drbd_connection { struct list_head transfer_log; /* all requests not yet fully processed */ - struct crypto_hash *cram_hmac_tfm; - struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */ - struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */ - struct crypto_hash *csums_tfm; - struct crypto_hash *verify_tfm; + struct crypto_shash *cram_hmac_tfm; + struct crypto_ahash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */ + struct crypto_ahash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */ + struct crypto_ahash *csums_tfm; + struct crypto_ahash *verify_tfm; void *int_dig_in; void *int_dig_vv; @@ -1524,8 +1524,8 @@ static inline void ov_out_of_sync_print(struct drbd_device *device) } -extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *); -extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *); +extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *); +extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *); /* worker callbacks */ extern int w_e_end_data_req(struct drbd_work *, int); extern int w_e_end_rsdata_req(struct drbd_work *, int); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 5b43dfb79819..fa209773d494 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1340,7 +1340,7 @@ void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd struct p_data *dp, int data_size) { if (peer_device->connection->peer_integrity_tfm) - data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); + data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm); _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size), dp->block_id); } @@ -1629,7 +1629,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * sock = &peer_device->connection->data; p = drbd_prepare_command(peer_device, sock); digest_size = peer_device->connection->integrity_tfm ? - crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0; + crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0; if (!p) return -EIO; @@ -1718,7 +1718,7 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd, p = drbd_prepare_command(peer_device, sock); digest_size = peer_device->connection->integrity_tfm ? - crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0; + crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0; if (!p) return -EIO; @@ -2498,11 +2498,11 @@ void conn_free_crypto(struct drbd_connection *connection) { drbd_free_sock(connection); - crypto_free_hash(connection->csums_tfm); - crypto_free_hash(connection->verify_tfm); - crypto_free_hash(connection->cram_hmac_tfm); - crypto_free_hash(connection->integrity_tfm); - crypto_free_hash(connection->peer_integrity_tfm); + crypto_free_ahash(connection->csums_tfm); + crypto_free_ahash(connection->verify_tfm); + crypto_free_shash(connection->cram_hmac_tfm); + crypto_free_ahash(connection->integrity_tfm); + crypto_free_ahash(connection->peer_integrity_tfm); kfree(connection->int_dig_in); kfree(connection->int_dig_vv); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index c055c5e12f24..226eb0c9f0fb 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -2160,19 +2160,34 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c } struct crypto { - struct crypto_hash *verify_tfm; - struct crypto_hash *csums_tfm; - struct crypto_hash *cram_hmac_tfm; - struct crypto_hash *integrity_tfm; + struct crypto_ahash *verify_tfm; + struct crypto_ahash *csums_tfm; + struct crypto_shash *cram_hmac_tfm; + struct crypto_ahash *integrity_tfm; }; static int -alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg) +alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg) { if (!tfm_name[0]) return NO_ERROR; - *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC); + *tfm = crypto_alloc_shash(tfm_name, 0, 0); + if (IS_ERR(*tfm)) { + *tfm = NULL; + return err_alg; + } + + return NO_ERROR; +} + +static int +alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg) +{ + if (!tfm_name[0]) + return NO_ERROR; + + *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*tfm)) { *tfm = NULL; return err_alg; @@ -2187,24 +2202,24 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf) char hmac_name[CRYPTO_MAX_ALG_NAME]; enum drbd_ret_code rv; - rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg, - ERR_CSUMS_ALG); + rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg, + ERR_CSUMS_ALG); if (rv != NO_ERROR) return rv; - rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg, - ERR_VERIFY_ALG); + rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg, + ERR_VERIFY_ALG); if (rv != NO_ERROR) return rv; - rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg, - ERR_INTEGRITY_ALG); + rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg, + ERR_INTEGRITY_ALG); if (rv != NO_ERROR) return rv; if (new_net_conf->cram_hmac_alg[0] != 0) { snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", new_net_conf->cram_hmac_alg); - rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name, - ERR_AUTH_ALG); + rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name, + ERR_AUTH_ALG); } return rv; @@ -2212,10 +2227,10 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf) static void free_crypto(struct crypto *crypto) { - crypto_free_hash(crypto->cram_hmac_tfm); - crypto_free_hash(crypto->integrity_tfm); - crypto_free_hash(crypto->csums_tfm); - crypto_free_hash(crypto->verify_tfm); + crypto_free_shash(crypto->cram_hmac_tfm); + crypto_free_ahash(crypto->integrity_tfm); + crypto_free_ahash(crypto->csums_tfm); + crypto_free_ahash(crypto->verify_tfm); } int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) @@ -2292,23 +2307,23 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) rcu_assign_pointer(connection->net_conf, new_net_conf); if (!rsr) { - crypto_free_hash(connection->csums_tfm); + crypto_free_ahash(connection->csums_tfm); connection->csums_tfm = crypto.csums_tfm; crypto.csums_tfm = NULL; } if (!ovr) { - crypto_free_hash(connection->verify_tfm); + crypto_free_ahash(connection->verify_tfm); connection->verify_tfm = crypto.verify_tfm; crypto.verify_tfm = NULL; } - crypto_free_hash(connection->integrity_tfm); + crypto_free_ahash(connection->integrity_tfm); connection->integrity_tfm = crypto.integrity_tfm; if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100) /* Do this without trying to take connection->data.mutex again. */ __drbd_send_protocol(connection, P_PROTOCOL_UPDATE); - crypto_free_hash(connection->cram_hmac_tfm); + crypto_free_shash(connection->cram_hmac_tfm); connection->cram_hmac_tfm = crypto.cram_hmac_tfm; mutex_unlock(&connection->resource->conf_update); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 1957fe8601dc..050aaa1c0350 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1627,7 +1627,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, digest_size = 0; if (!trim && peer_device->connection->peer_integrity_tfm) { - digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); + digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm); /* * FIXME: Receive the incoming digest into the receive buffer * here, together with its struct p_data? @@ -1741,7 +1741,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req digest_size = 0; if (peer_device->connection->peer_integrity_tfm) { - digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); + digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm); err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); if (err) return err; @@ -3321,7 +3321,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in int p_proto, p_discard_my_data, p_two_primaries, cf; struct net_conf *nc, *old_net_conf, *new_net_conf = NULL; char integrity_alg[SHARED_SECRET_MAX] = ""; - struct crypto_hash *peer_integrity_tfm = NULL; + struct crypto_ahash *peer_integrity_tfm = NULL; void *int_dig_in = NULL, *int_dig_vv = NULL; p_proto = be32_to_cpu(p->protocol); @@ -3402,14 +3402,14 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in * change. */ - peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC); + peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC); if (!peer_integrity_tfm) { drbd_err(connection, "peer data-integrity-alg %s not supported\n", integrity_alg); goto disconnect; } - hash_size = crypto_hash_digestsize(peer_integrity_tfm); + hash_size = crypto_ahash_digestsize(peer_integrity_tfm); int_dig_in = kmalloc(hash_size, GFP_KERNEL); int_dig_vv = kmalloc(hash_size, GFP_KERNEL); if (!(int_dig_in && int_dig_vv)) { @@ -3439,7 +3439,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in mutex_unlock(&connection->resource->conf_update); mutex_unlock(&connection->data.mutex); - crypto_free_hash(connection->peer_integrity_tfm); + crypto_free_ahash(connection->peer_integrity_tfm); kfree(connection->int_dig_in); kfree(connection->int_dig_vv); connection->peer_integrity_tfm = peer_integrity_tfm; @@ -3457,7 +3457,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in disconnect_rcu_unlock: rcu_read_unlock(); disconnect: - crypto_free_hash(peer_integrity_tfm); + crypto_free_ahash(peer_integrity_tfm); kfree(int_dig_in); kfree(int_dig_vv); conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); @@ -3469,15 +3469,15 @@ disconnect: * return: NULL (alg name was "") * ERR_PTR(error) if something goes wrong * or the crypto hash ptr, if it worked out ok. */ -static struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device, +static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device, const char *alg, const char *name) { - struct crypto_hash *tfm; + struct crypto_ahash *tfm; if (!alg[0]) return NULL; - tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n", alg, name, PTR_ERR(tfm)); @@ -3530,8 +3530,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i struct drbd_device *device; struct p_rs_param_95 *p; unsigned int header_size, data_size, exp_max_sz; - struct crypto_hash *verify_tfm = NULL; - struct crypto_hash *csums_tfm = NULL; + struct crypto_ahash *verify_tfm = NULL; + struct crypto_ahash *csums_tfm = NULL; struct net_conf *old_net_conf, *new_net_conf = NULL; struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL; const int apv = connection->agreed_pro_version; @@ -3678,14 +3678,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i if (verify_tfm) { strcpy(new_net_conf->verify_alg, p->verify_alg); new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; - crypto_free_hash(peer_device->connection->verify_tfm); + crypto_free_ahash(peer_device->connection->verify_tfm); peer_device->connection->verify_tfm = verify_tfm; drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg); } if (csums_tfm) { strcpy(new_net_conf->csums_alg, p->csums_alg); new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; - crypto_free_hash(peer_device->connection->csums_tfm); + crypto_free_ahash(peer_device->connection->csums_tfm); peer_device->connection->csums_tfm = csums_tfm; drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg); } @@ -3729,9 +3729,9 @@ disconnect: mutex_unlock(&connection->resource->conf_update); /* just for completeness: actually not needed, * as this is not reached if csums_tfm was ok. */ - crypto_free_hash(csums_tfm); + crypto_free_ahash(csums_tfm); /* but free the verify_tfm again, if csums_tfm did not work out */ - crypto_free_hash(verify_tfm); + crypto_free_ahash(verify_tfm); conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); return -EIO; } @@ -4925,14 +4925,13 @@ static int drbd_do_auth(struct drbd_connection *connection) { struct drbd_socket *sock; char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ - struct scatterlist sg; char *response = NULL; char *right_response = NULL; char *peers_ch = NULL; unsigned int key_len; char secret[SHARED_SECRET_MAX]; /* 64 byte */ unsigned int resp_size; - struct hash_desc desc; + SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm); struct packet_info pi; struct net_conf *nc; int err, rv; @@ -4945,12 +4944,12 @@ static int drbd_do_auth(struct drbd_connection *connection) memcpy(secret, nc->shared_secret, key_len); rcu_read_unlock(); - desc.tfm = connection->cram_hmac_tfm; - desc.flags = 0; + desc->tfm = connection->cram_hmac_tfm; + desc->flags = 0; - rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); + rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); if (rv) { - drbd_err(connection, "crypto_hash_setkey() failed with %d\n", rv); + drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv); rv = -1; goto fail; } @@ -5011,7 +5010,7 @@ static int drbd_do_auth(struct drbd_connection *connection) goto fail; } - resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm); + resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm); response = kmalloc(resp_size, GFP_NOIO); if (response == NULL) { drbd_err(connection, "kmalloc of response failed\n"); @@ -5019,10 +5018,7 @@ static int drbd_do_auth(struct drbd_connection *connection) goto fail; } - sg_init_table(&sg, 1); - sg_set_buf(&sg, peers_ch, pi.size); - - rv = crypto_hash_digest(&desc, &sg, sg.length, response); + rv = crypto_shash_digest(desc, peers_ch, pi.size, response); if (rv) { drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv); rv = -1; @@ -5070,9 +5066,8 @@ static int drbd_do_auth(struct drbd_connection *connection) goto fail; } - sg_set_buf(&sg, my_challenge, CHALLENGE_LEN); - - rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); + rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN, + right_response); if (rv) { drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv); rv = -1; @@ -5091,6 +5086,7 @@ static int drbd_do_auth(struct drbd_connection *connection) kfree(peers_ch); kfree(response); kfree(right_response); + shash_desc_zero(desc); return rv; } diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index eff716c27b1f..4d87499f0d54 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -274,51 +274,56 @@ void drbd_request_endio(struct bio *bio) complete_master_bio(device, &m); } -void drbd_csum_ee(struct crypto_hash *tfm, struct drbd_peer_request *peer_req, void *digest) +void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest) { - struct hash_desc desc; + AHASH_REQUEST_ON_STACK(req, tfm); struct scatterlist sg; struct page *page = peer_req->pages; struct page *tmp; unsigned len; - desc.tfm = tfm; - desc.flags = 0; + ahash_request_set_tfm(req, tfm); + ahash_request_set_callback(req, 0, NULL, NULL); sg_init_table(&sg, 1); - crypto_hash_init(&desc); + crypto_ahash_init(req); while ((tmp = page_chain_next(page))) { /* all but the last page will be fully used */ sg_set_page(&sg, page, PAGE_SIZE, 0); - crypto_hash_update(&desc, &sg, sg.length); + ahash_request_set_crypt(req, &sg, NULL, sg.length); + crypto_ahash_update(req); page = tmp; } /* and now the last, possibly only partially used page */ len = peer_req->i.size & (PAGE_SIZE - 1); sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); - crypto_hash_update(&desc, &sg, sg.length); - crypto_hash_final(&desc, digest); + ahash_request_set_crypt(req, &sg, digest, sg.length); + crypto_ahash_finup(req); + ahash_request_zero(req); } -void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest) +void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest) { - struct hash_desc desc; + AHASH_REQUEST_ON_STACK(req, tfm); struct scatterlist sg; struct bio_vec bvec; struct bvec_iter iter; - desc.tfm = tfm; - desc.flags = 0; + ahash_request_set_tfm(req, tfm); + ahash_request_set_callback(req, 0, NULL, NULL); sg_init_table(&sg, 1); - crypto_hash_init(&desc); + crypto_ahash_init(req); bio_for_each_segment(bvec, bio, iter) { sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); - crypto_hash_update(&desc, &sg, sg.length); + ahash_request_set_crypt(req, &sg, NULL, sg.length); + crypto_ahash_update(req); } - crypto_hash_final(&desc, digest); + ahash_request_set_crypt(req, NULL, digest, 0); + crypto_ahash_final(req); + ahash_request_zero(req); } /* MAYBE merge common code with w_e_end_ov_req */ @@ -337,7 +342,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel) if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) goto out; - digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm); + digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm); digest = kmalloc(digest_size, GFP_NOIO); if (digest) { sector_t sector = peer_req->i.sector; @@ -1113,7 +1118,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) * a real fix would be much more involved, * introducing more locking mechanisms */ if (peer_device->connection->csums_tfm) { - digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm); + digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm); D_ASSERT(device, digest_size == di->digest_size); digest = kmalloc(digest_size, GFP_NOIO); } @@ -1163,7 +1168,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel) if (unlikely(cancel)) goto out; - digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm); + digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm); digest = kmalloc(digest_size, GFP_NOIO); if (!digest) { err = 1; /* terminate the connection in case the allocation failed */ @@ -1235,7 +1240,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel) di = peer_req->digest; if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { - digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm); + digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm); digest = kmalloc(digest_size, GFP_NOIO); if (digest) { drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index ff00331bff49..67ee8b08ab53 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -77,7 +77,7 @@ config HW_RANDOM_ATMEL config HW_RANDOM_BCM63XX tristate "Broadcom BCM63xx Random Number Generator support" - depends on BCM63XX + depends on BCM63XX || BMIPS_GENERIC default HW_RANDOM ---help--- This driver provides kernel-side support for the Random Number @@ -382,6 +382,19 @@ config HW_RANDOM_STM32 If unsure, say N. +config HW_RANDOM_PIC32 + tristate "Microchip PIC32 Random Number Generator support" + depends on HW_RANDOM && MACH_PIC32 + default y + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on a PIC32. + + To compile this driver as a module, choose M here. the + module will be called pic32-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 5ad397635128..f5a6fa7690e7 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -33,3 +33,4 @@ obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o +obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c index 4b31f1387f37..ca9c40309757 100644 --- a/drivers/char/hw_random/bcm63xx-rng.c +++ b/drivers/char/hw_random/bcm63xx-rng.c @@ -79,10 +79,8 @@ static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data) static int bcm63xx_rng_probe(struct platform_device *pdev) { struct resource *r; - struct clk *clk; int ret; struct bcm63xx_rng_priv *priv; - struct hwrng *rng; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -132,10 +130,19 @@ static int bcm63xx_rng_probe(struct platform_device *pdev) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id bcm63xx_rng_of_match[] = { + { .compatible = "brcm,bcm6368-rng", }, + {}, +}; +MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match); +#endif + static struct platform_driver bcm63xx_rng_driver = { .probe = bcm63xx_rng_probe, .driver = { .name = "bcm63xx-rng", + .of_match_table = of_match_ptr(bcm63xx_rng_of_match), }, }; diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c index 30cf4623184f..ada081232528 100644 --- a/drivers/char/hw_random/exynos-rng.c +++ b/drivers/char/hw_random/exynos-rng.c @@ -144,8 +144,7 @@ static int exynos_rng_probe(struct platform_device *pdev) return devm_hwrng_register(&pdev->dev, &exynos_rng->rng); } -#ifdef CONFIG_PM -static int exynos_rng_runtime_suspend(struct device *dev) +static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); @@ -155,7 +154,7 @@ static int exynos_rng_runtime_suspend(struct device *dev) return 0; } -static int exynos_rng_runtime_resume(struct device *dev) +static int __maybe_unused exynos_rng_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); @@ -163,12 +162,12 @@ static int exynos_rng_runtime_resume(struct device *dev) return clk_prepare_enable(exynos_rng->clk); } -static int exynos_rng_suspend(struct device *dev) +static int __maybe_unused exynos_rng_suspend(struct device *dev) { return pm_runtime_force_suspend(dev); } -static int exynos_rng_resume(struct device *dev) +static int __maybe_unused exynos_rng_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); @@ -180,7 +179,6 @@ static int exynos_rng_resume(struct device *dev) return exynos_rng_configure(exynos_rng); } -#endif static const struct dev_pm_ops exynos_rng_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume) diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 843d6f6aee7a..3b06c1d6cfb2 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -743,6 +743,16 @@ static const struct of_device_id n2rng_match[] = { .compatible = "SUNW,kt-rng", .data = (void *) 1, }, + { + .name = "random-number-generator", + .compatible = "ORCL,m4-rng", + .data = (void *) 1, + }, + { + .name = "random-number-generator", + .compatible = "ORCL,m7-rng", + .data = (void *) 1, + }, {}, }; MODULE_DEVICE_TABLE(of, n2rng_match); diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c new file mode 100644 index 000000000000..108897bea2d0 --- /dev/null +++ b/drivers/char/hw_random/pic32-rng.c @@ -0,0 +1,155 @@ +/* + * PIC32 RNG driver + * + * Joshua Henderson <joshua.henderson@microchip.com> + * Copyright (C) 2016 Microchip Technology Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/err.h> +#include <linux/hw_random.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define RNGCON 0x04 +#define TRNGEN BIT(8) +#define PRNGEN BIT(9) +#define PRNGCONT BIT(10) +#define TRNGMOD BIT(11) +#define SEEDLOAD BIT(12) +#define RNGPOLY1 0x08 +#define RNGPOLY2 0x0C +#define RNGNUMGEN1 0x10 +#define RNGNUMGEN2 0x14 +#define RNGSEED1 0x18 +#define RNGSEED2 0x1C +#define RNGRCNT 0x20 +#define RCNT_MASK 0x7F + +struct pic32_rng { + void __iomem *base; + struct hwrng rng; + struct clk *clk; +}; + +/* + * The TRNG can generate up to 24Mbps. This is a timeout that should be safe + * enough given the instructions in the loop and that the TRNG may not always + * be at maximum rate. + */ +#define RNG_TIMEOUT 500 + +static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max, + bool wait) +{ + struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng); + u64 *data = buf; + u32 t; + unsigned int timeout = RNG_TIMEOUT; + + if (max < 8) + return 0; + + do { + t = readl(priv->base + RNGRCNT) & RCNT_MASK; + if (t == 64) { + /* TRNG value comes through the seed registers */ + *data = ((u64)readl(priv->base + RNGSEED2) << 32) + + readl(priv->base + RNGSEED1); + return 8; + } + } while (wait && --timeout); + + return -EIO; +} + +static int pic32_rng_probe(struct platform_device *pdev) +{ + struct pic32_rng *priv; + struct resource *res; + u32 v; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* enable TRNG in enhanced mode */ + v = TRNGEN | TRNGMOD; + writel(v, priv->base + RNGCON); + + priv->rng.name = pdev->name; + priv->rng.read = pic32_rng_read; + + ret = hwrng_register(&priv->rng); + if (ret) + goto err_register; + + platform_set_drvdata(pdev, priv); + + return 0; + +err_register: + clk_disable_unprepare(priv->clk); + return ret; +} + +static int pic32_rng_remove(struct platform_device *pdev) +{ + struct pic32_rng *rng = platform_get_drvdata(pdev); + + hwrng_unregister(&rng->rng); + writel(0, rng->base + RNGCON); + clk_disable_unprepare(rng->clk); + return 0; +} + +static const struct of_device_id pic32_rng_of_match[] = { + { .compatible = "microchip,pic32mzda-rng", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, pic32_rng_of_match); + +static struct platform_driver pic32_rng_driver = { + .probe = pic32_rng_probe, + .remove = pic32_rng_remove, + .driver = { + .name = "pic32-rng", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(pic32_rng_of_match), + }, +}; + +module_platform_driver(pic32_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>"); +MODULE_DESCRIPTION("Microchip PIC32 RNG Driver"); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 07d494276aad..477fffdb4f49 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -296,6 +296,7 @@ config CRYPTO_DEV_OMAP_AES depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS select CRYPTO_AES select CRYPTO_BLKCIPHER + select CRYPTO_ENGINE help OMAP processors have AES module accelerator. Select this if you want to use the OMAP module for AES algorithms. @@ -487,7 +488,7 @@ config CRYPTO_DEV_IMGTEC_HASH config CRYPTO_DEV_SUN4I_SS tristate "Support for Allwinner Security System cryptographic accelerator" - depends on ARCH_SUNXI + depends on ARCH_SUNXI && !64BIT select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_AES @@ -507,6 +508,10 @@ config CRYPTO_DEV_ROCKCHIP depends on OF && ARCH_ROCKCHIP select CRYPTO_AES select CRYPTO_DES + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_HASH select CRYPTO_BLKCIPHER help diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 3eb3f1279fb7..e3d40a8dfffb 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -369,12 +369,6 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size) return len ? block_size - len : 0; } -static inline struct aead_request * -aead_request_cast(struct crypto_async_request *req) -{ - return container_of(req, struct aead_request, base); -} - static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx) { struct atmel_aes_dev *aes_dd = NULL; @@ -2085,9 +2079,9 @@ static int atmel_aes_probe(struct platform_device *pdev) } aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res); - if (!aes_dd->io_base) { + if (IS_ERR(aes_dd->io_base)) { dev_err(dev, "can't ioremap\n"); - err = -ENOMEM; + err = PTR_ERR(aes_dd->io_base); goto res_err; } diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h index 83b2d7425666..e08897109cab 100644 --- a/drivers/crypto/atmel-sha-regs.h +++ b/drivers/crypto/atmel-sha-regs.h @@ -8,6 +8,8 @@ #define SHA_CR_START (1 << 0) #define SHA_CR_FIRST (1 << 4) #define SHA_CR_SWRST (1 << 8) +#define SHA_CR_WUIHV (1 << 12) +#define SHA_CR_WUIEHV (1 << 13) #define SHA_MR 0x04 #define SHA_MR_MODE_MASK (0x3 << 0) @@ -15,6 +17,8 @@ #define SHA_MR_MODE_AUTO 0x1 #define SHA_MR_MODE_PDC 0x2 #define SHA_MR_PROCDLY (1 << 4) +#define SHA_MR_UIHV (1 << 5) +#define SHA_MR_UIEHV (1 << 6) #define SHA_MR_ALGO_SHA1 (0 << 8) #define SHA_MR_ALGO_SHA256 (1 << 8) #define SHA_MR_ALGO_SHA384 (2 << 8) diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 8bf9914d4d15..97e34799e077 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -53,6 +53,7 @@ #define SHA_FLAGS_FINUP BIT(16) #define SHA_FLAGS_SG BIT(17) +#define SHA_FLAGS_ALGO_MASK GENMASK(22, 18) #define SHA_FLAGS_SHA1 BIT(18) #define SHA_FLAGS_SHA224 BIT(19) #define SHA_FLAGS_SHA256 BIT(20) @@ -60,11 +61,12 @@ #define SHA_FLAGS_SHA512 BIT(22) #define SHA_FLAGS_ERROR BIT(23) #define SHA_FLAGS_PAD BIT(24) +#define SHA_FLAGS_RESTORE BIT(25) #define SHA_OP_UPDATE 1 #define SHA_OP_FINAL 2 -#define SHA_BUFFER_LEN PAGE_SIZE +#define SHA_BUFFER_LEN (PAGE_SIZE / 16) #define ATMEL_SHA_DMA_THRESHOLD 56 @@ -73,10 +75,15 @@ struct atmel_sha_caps { bool has_dualbuff; bool has_sha224; bool has_sha_384_512; + bool has_uihv; }; struct atmel_sha_dev; +/* + * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as + * tested by the ahash_prepare_alg() function. + */ struct atmel_sha_reqctx { struct atmel_sha_dev *dd; unsigned long flags; @@ -95,7 +102,7 @@ struct atmel_sha_reqctx { size_t block_size; - u8 buffer[0] __aligned(sizeof(u32)); + u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); }; struct atmel_sha_ctx { @@ -122,6 +129,7 @@ struct atmel_sha_dev { spinlock_t lock; int err; struct tasklet_struct done_task; + struct tasklet_struct queue_task; unsigned long flags; struct crypto_queue queue; @@ -317,7 +325,8 @@ static int atmel_sha_init(struct ahash_request *req) static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) { struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); - u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; + u32 valmr = SHA_MR_MODE_AUTO; + unsigned int i, hashsize = 0; if (likely(dma)) { if (!dd->caps.has_dma) @@ -329,22 +338,62 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); } - if (ctx->flags & SHA_FLAGS_SHA1) + switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { + case SHA_FLAGS_SHA1: valmr |= SHA_MR_ALGO_SHA1; - else if (ctx->flags & SHA_FLAGS_SHA224) + hashsize = SHA1_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA224: valmr |= SHA_MR_ALGO_SHA224; - else if (ctx->flags & SHA_FLAGS_SHA256) + hashsize = SHA256_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA256: valmr |= SHA_MR_ALGO_SHA256; - else if (ctx->flags & SHA_FLAGS_SHA384) + hashsize = SHA256_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA384: valmr |= SHA_MR_ALGO_SHA384; - else if (ctx->flags & SHA_FLAGS_SHA512) + hashsize = SHA512_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA512: valmr |= SHA_MR_ALGO_SHA512; + hashsize = SHA512_DIGEST_SIZE; + break; + + default: + break; + } /* Setting CR_FIRST only for the first iteration */ - if (!(ctx->digcnt[0] || ctx->digcnt[1])) - valcr = SHA_CR_FIRST; + if (!(ctx->digcnt[0] || ctx->digcnt[1])) { + atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); + } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { + const u32 *hash = (const u32 *)ctx->digest; + + /* + * Restore the hardware context: update the User Initialize + * Hash Value (UIHV) with the value saved when the latest + * 'update' operation completed on this very same crypto + * request. + */ + ctx->flags &= ~SHA_FLAGS_RESTORE; + atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); + for (i = 0; i < hashsize / sizeof(u32); ++i) + atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]); + atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); + valmr |= SHA_MR_UIHV; + } + /* + * WARNING: If the UIHV feature is not available, the hardware CANNOT + * process concurrent requests: the internal registers used to store + * the hash/digest are still set to the partial digest output values + * computed during the latest round. + */ - atmel_sha_write(dd, SHA_CR, valcr); atmel_sha_write(dd, SHA_MR, valmr); } @@ -713,23 +762,31 @@ static void atmel_sha_copy_hash(struct ahash_request *req) { struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); u32 *hash = (u32 *)ctx->digest; - int i; + unsigned int i, hashsize; - if (ctx->flags & SHA_FLAGS_SHA1) - for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) - hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); - else if (ctx->flags & SHA_FLAGS_SHA224) - for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++) - hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); - else if (ctx->flags & SHA_FLAGS_SHA256) - for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) - hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); - else if (ctx->flags & SHA_FLAGS_SHA384) - for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++) - hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); - else - for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++) - hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); + switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { + case SHA_FLAGS_SHA1: + hashsize = SHA1_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA224: + case SHA_FLAGS_SHA256: + hashsize = SHA256_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA384: + case SHA_FLAGS_SHA512: + hashsize = SHA512_DIGEST_SIZE; + break; + + default: + /* Should not happen... */ + return; + } + + for (i = 0; i < hashsize / sizeof(u32); ++i) + hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); + ctx->flags |= SHA_FLAGS_RESTORE; } static void atmel_sha_copy_ready_hash(struct ahash_request *req) @@ -788,7 +845,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err) req->base.complete(&req->base, err); /* handle new request */ - tasklet_schedule(&dd->done_task); + tasklet_schedule(&dd->queue_task); } static int atmel_sha_hw_init(struct atmel_sha_dev *dd) @@ -922,36 +979,17 @@ static int atmel_sha_update(struct ahash_request *req) static int atmel_sha_final(struct ahash_request *req) { struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); - struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct atmel_sha_dev *dd = tctx->dd; - - int err = 0; ctx->flags |= SHA_FLAGS_FINUP; if (ctx->flags & SHA_FLAGS_ERROR) return 0; /* uncompleted hash is not needed */ - if (ctx->bufcnt) { - return atmel_sha_enqueue(req, SHA_OP_FINAL); - } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */ - err = atmel_sha_hw_init(dd); - if (err) - goto err1; - - dd->flags |= SHA_FLAGS_BUSY; - err = atmel_sha_final_req(dd); - } else { + if (ctx->flags & SHA_FLAGS_PAD) /* copy ready hash (+ finalize hmac) */ return atmel_sha_finish(req); - } - -err1: - if (err != -EINPROGRESS) - /* done_task will not finish it, so do it here */ - atmel_sha_finish_req(req, err); - return err; + return atmel_sha_enqueue(req, SHA_OP_FINAL); } static int atmel_sha_finup(struct ahash_request *req) @@ -979,11 +1017,27 @@ static int atmel_sha_digest(struct ahash_request *req) return atmel_sha_init(req) ?: atmel_sha_finup(req); } + +static int atmel_sha_export(struct ahash_request *req, void *out) +{ + const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + + memcpy(out, ctx, sizeof(*ctx)); + return 0; +} + +static int atmel_sha_import(struct ahash_request *req, const void *in) +{ + struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); + + memcpy(ctx, in, sizeof(*ctx)); + return 0; +} + static int atmel_sha_cra_init(struct crypto_tfm *tfm) { crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct atmel_sha_reqctx) + - SHA_BUFFER_LEN + SHA512_BLOCK_SIZE); + sizeof(struct atmel_sha_reqctx)); return 0; } @@ -995,8 +1049,11 @@ static struct ahash_alg sha_1_256_algs[] = { .final = atmel_sha_final, .finup = atmel_sha_finup, .digest = atmel_sha_digest, + .export = atmel_sha_export, + .import = atmel_sha_import, .halg = { .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct atmel_sha_reqctx), .base = { .cra_name = "sha1", .cra_driver_name = "atmel-sha1", @@ -1016,8 +1073,11 @@ static struct ahash_alg sha_1_256_algs[] = { .final = atmel_sha_final, .finup = atmel_sha_finup, .digest = atmel_sha_digest, + .export = atmel_sha_export, + .import = atmel_sha_import, .halg = { .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct atmel_sha_reqctx), .base = { .cra_name = "sha256", .cra_driver_name = "atmel-sha256", @@ -1039,8 +1099,11 @@ static struct ahash_alg sha_224_alg = { .final = atmel_sha_final, .finup = atmel_sha_finup, .digest = atmel_sha_digest, + .export = atmel_sha_export, + .import = atmel_sha_import, .halg = { .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct atmel_sha_reqctx), .base = { .cra_name = "sha224", .cra_driver_name = "atmel-sha224", @@ -1062,8 +1125,11 @@ static struct ahash_alg sha_384_512_algs[] = { .final = atmel_sha_final, .finup = atmel_sha_finup, .digest = atmel_sha_digest, + .export = atmel_sha_export, + .import = atmel_sha_import, .halg = { .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct atmel_sha_reqctx), .base = { .cra_name = "sha384", .cra_driver_name = "atmel-sha384", @@ -1083,8 +1149,11 @@ static struct ahash_alg sha_384_512_algs[] = { .final = atmel_sha_final, .finup = atmel_sha_finup, .digest = atmel_sha_digest, + .export = atmel_sha_export, + .import = atmel_sha_import, .halg = { .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct atmel_sha_reqctx), .base = { .cra_name = "sha512", .cra_driver_name = "atmel-sha512", @@ -1100,16 +1169,18 @@ static struct ahash_alg sha_384_512_algs[] = { }, }; +static void atmel_sha_queue_task(unsigned long data) +{ + struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; + + atmel_sha_handle_queue(dd, NULL); +} + static void atmel_sha_done_task(unsigned long data) { struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; int err = 0; - if (!(SHA_FLAGS_BUSY & dd->flags)) { - atmel_sha_handle_queue(dd, NULL); - return; - } - if (SHA_FLAGS_CPU & dd->flags) { if (SHA_FLAGS_OUTPUT_READY & dd->flags) { dd->flags &= ~SHA_FLAGS_OUTPUT_READY; @@ -1272,14 +1343,23 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd) dd->caps.has_dualbuff = 0; dd->caps.has_sha224 = 0; dd->caps.has_sha_384_512 = 0; + dd->caps.has_uihv = 0; /* keep only major version number */ switch (dd->hw_version & 0xff0) { + case 0x510: + dd->caps.has_dma = 1; + dd->caps.has_dualbuff = 1; + dd->caps.has_sha224 = 1; + dd->caps.has_sha_384_512 = 1; + dd->caps.has_uihv = 1; + break; case 0x420: dd->caps.has_dma = 1; dd->caps.has_dualbuff = 1; dd->caps.has_sha224 = 1; dd->caps.has_sha_384_512 = 1; + dd->caps.has_uihv = 1; break; case 0x410: dd->caps.has_dma = 1; @@ -1366,6 +1446,8 @@ static int atmel_sha_probe(struct platform_device *pdev) tasklet_init(&sha_dd->done_task, atmel_sha_done_task, (unsigned long)sha_dd); + tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, + (unsigned long)sha_dd); crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); @@ -1404,9 +1486,9 @@ static int atmel_sha_probe(struct platform_device *pdev) } sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res); - if (!sha_dd->io_base) { + if (IS_ERR(sha_dd->io_base)) { dev_err(dev, "can't ioremap\n"); - err = -ENOMEM; + err = PTR_ERR(sha_dd->io_base); goto res_err; } @@ -1464,6 +1546,7 @@ err_sha_dma: iclk_unprepare: clk_unprepare(sha_dd->iclk); res_err: + tasklet_kill(&sha_dd->queue_task); tasklet_kill(&sha_dd->done_task); sha_dd_err: dev_err(dev, "initialization failed.\n"); @@ -1484,6 +1567,7 @@ static int atmel_sha_remove(struct platform_device *pdev) atmel_sha_unregister_algs(sha_dd); + tasklet_kill(&sha_dd->queue_task); tasklet_kill(&sha_dd->done_task); if (sha_dd->caps.has_dma) diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 2c7a628d0375..bf467d7be35c 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev) } tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res); - if (!tdes_dd->io_base) { + if (IS_ERR(tdes_dd->io_base)) { dev_err(dev, "can't ioremap\n"); - err = -ENOMEM; + err = PTR_ERR(tdes_dd->io_base); goto res_err; } diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 69d4a1326fee..44d30b45f3cc 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -534,7 +534,7 @@ static int caam_probe(struct platform_device *pdev) * long pointers in master configuration register */ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | - MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | + MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); /* diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f7e0d8d4c3da..6fd63a600614 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -65,7 +65,7 @@ static int caam_reset_hw_jr(struct device *dev) /* * Shutdown JobR independent of platform property code */ -int caam_jr_shutdown(struct device *dev) +static int caam_jr_shutdown(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); dma_addr_t inpbusaddr, outbusaddr; diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index a8a79975682f..0ba9c40597dc 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -455,7 +455,8 @@ struct caam_ctrl { #define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT) #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ -#define MCFGR_BURST_64 0x00000001 /* Max burst size */ +#define MCFGR_LARGE_BURST 0x00000004 /* 128/256-byte burst size */ +#define MCFGR_BURST_64 0x00000001 /* 64-byte burst size */ /* JRSTART register offsets */ #define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */ diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 55a1f3951578..b750592cc936 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o -ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o +ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o ccp-$(CONFIG_PCI) += ccp-pci.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index d89f20c04266..3d9acc53d247 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -220,6 +220,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req) return ccp_aes_cmac_finup(req); } +static int ccp_aes_cmac_export(struct ahash_request *req, void *out) +{ + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_aes_cmac_exp_ctx state; + + state.null_msg = rctx->null_msg; + memcpy(state.iv, rctx->iv, sizeof(state.iv)); + state.buf_count = rctx->buf_count; + memcpy(state.buf, rctx->buf, sizeof(state.buf)); + + /* 'out' may not be aligned so memcpy from local variable */ + memcpy(out, &state, sizeof(state)); + + return 0; +} + +static int ccp_aes_cmac_import(struct ahash_request *req, const void *in) +{ + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_aes_cmac_exp_ctx state; + + /* 'in' may not be aligned so memcpy to local variable */ + memcpy(&state, in, sizeof(state)); + + memset(rctx, 0, sizeof(*rctx)); + rctx->null_msg = state.null_msg; + memcpy(rctx->iv, state.iv, sizeof(rctx->iv)); + rctx->buf_count = state.buf_count; + memcpy(rctx->buf, state.buf, sizeof(rctx->buf)); + + return 0; +} + static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int key_len) { @@ -352,10 +385,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head) alg->final = ccp_aes_cmac_final; alg->finup = ccp_aes_cmac_finup; alg->digest = ccp_aes_cmac_digest; + alg->export = ccp_aes_cmac_export; + alg->import = ccp_aes_cmac_import; alg->setkey = ccp_aes_cmac_setkey; halg = &alg->halg; halg->digestsize = AES_BLOCK_SIZE; + halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx); base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)"); diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index 7984f910884d..89291c15015c 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) AES crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -259,6 +259,7 @@ static struct crypto_alg ccp_aes_rfc3686_defaults = { struct ccp_aes_def { enum ccp_aes_mode mode; + unsigned int version; const char *name; const char *driver_name; unsigned int blocksize; @@ -269,6 +270,7 @@ struct ccp_aes_def { static struct ccp_aes_def aes_algs[] = { { .mode = CCP_AES_MODE_ECB, + .version = CCP_VERSION(3, 0), .name = "ecb(aes)", .driver_name = "ecb-aes-ccp", .blocksize = AES_BLOCK_SIZE, @@ -277,6 +279,7 @@ static struct ccp_aes_def aes_algs[] = { }, { .mode = CCP_AES_MODE_CBC, + .version = CCP_VERSION(3, 0), .name = "cbc(aes)", .driver_name = "cbc-aes-ccp", .blocksize = AES_BLOCK_SIZE, @@ -285,6 +288,7 @@ static struct ccp_aes_def aes_algs[] = { }, { .mode = CCP_AES_MODE_CFB, + .version = CCP_VERSION(3, 0), .name = "cfb(aes)", .driver_name = "cfb-aes-ccp", .blocksize = AES_BLOCK_SIZE, @@ -293,6 +297,7 @@ static struct ccp_aes_def aes_algs[] = { }, { .mode = CCP_AES_MODE_OFB, + .version = CCP_VERSION(3, 0), .name = "ofb(aes)", .driver_name = "ofb-aes-ccp", .blocksize = 1, @@ -301,6 +306,7 @@ static struct ccp_aes_def aes_algs[] = { }, { .mode = CCP_AES_MODE_CTR, + .version = CCP_VERSION(3, 0), .name = "ctr(aes)", .driver_name = "ctr-aes-ccp", .blocksize = 1, @@ -309,6 +315,7 @@ static struct ccp_aes_def aes_algs[] = { }, { .mode = CCP_AES_MODE_CTR, + .version = CCP_VERSION(3, 0), .name = "rfc3686(ctr(aes))", .driver_name = "rfc3686-ctr-aes-ccp", .blocksize = 1, @@ -357,8 +364,11 @@ static int ccp_register_aes_alg(struct list_head *head, int ccp_register_aes_algs(struct list_head *head) { int i, ret; + unsigned int ccpversion = ccp_version(); for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { + if (aes_algs[i].version > ccpversion) + continue; ret = ccp_register_aes_alg(head, &aes_algs[i]); if (ret) return ret; diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index d14b3f28e010..b5ad72897dc2 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) SHA crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -207,6 +207,43 @@ static int ccp_sha_digest(struct ahash_request *req) return ccp_sha_finup(req); } +static int ccp_sha_export(struct ahash_request *req, void *out) +{ + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sha_exp_ctx state; + + state.type = rctx->type; + state.msg_bits = rctx->msg_bits; + state.first = rctx->first; + memcpy(state.ctx, rctx->ctx, sizeof(state.ctx)); + state.buf_count = rctx->buf_count; + memcpy(state.buf, rctx->buf, sizeof(state.buf)); + + /* 'out' may not be aligned so memcpy from local variable */ + memcpy(out, &state, sizeof(state)); + + return 0; +} + +static int ccp_sha_import(struct ahash_request *req, const void *in) +{ + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sha_exp_ctx state; + + /* 'in' may not be aligned so memcpy to local variable */ + memcpy(&state, in, sizeof(state)); + + memset(rctx, 0, sizeof(*rctx)); + rctx->type = state.type; + rctx->msg_bits = state.msg_bits; + rctx->first = state.first; + memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx)); + rctx->buf_count = state.buf_count; + memcpy(rctx->buf, state.buf, sizeof(rctx->buf)); + + return 0; +} + static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int key_len) { @@ -304,6 +341,7 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) } struct ccp_sha_def { + unsigned int version; const char *name; const char *drv_name; enum ccp_sha_type type; @@ -313,6 +351,7 @@ struct ccp_sha_def { static struct ccp_sha_def sha_algs[] = { { + .version = CCP_VERSION(3, 0), .name = "sha1", .drv_name = "sha1-ccp", .type = CCP_SHA_TYPE_1, @@ -320,6 +359,7 @@ static struct ccp_sha_def sha_algs[] = { .block_size = SHA1_BLOCK_SIZE, }, { + .version = CCP_VERSION(3, 0), .name = "sha224", .drv_name = "sha224-ccp", .type = CCP_SHA_TYPE_224, @@ -327,6 +367,7 @@ static struct ccp_sha_def sha_algs[] = { .block_size = SHA224_BLOCK_SIZE, }, { + .version = CCP_VERSION(3, 0), .name = "sha256", .drv_name = "sha256-ccp", .type = CCP_SHA_TYPE_256, @@ -403,9 +444,12 @@ static int ccp_register_sha_alg(struct list_head *head, alg->final = ccp_sha_final; alg->finup = ccp_sha_finup; alg->digest = ccp_sha_digest; + alg->export = ccp_sha_export; + alg->import = ccp_sha_import; halg = &alg->halg; halg->digestsize = def->digest_size; + halg->statesize = sizeof(struct ccp_sha_exp_ctx); base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); @@ -440,8 +484,11 @@ static int ccp_register_sha_alg(struct list_head *head, int ccp_register_sha_algs(struct list_head *head) { int i, ret; + unsigned int ccpversion = ccp_version(); for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { + if (sha_algs[i].version > ccpversion) + continue; ret = ccp_register_sha_alg(head, &sha_algs[i]); if (ret) return ret; diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 76a96f0f44c6..a326ec20bfa8 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx { struct ccp_cmd cmd; }; +struct ccp_aes_cmac_exp_ctx { + unsigned int null_msg; + + u8 iv[AES_BLOCK_SIZE]; + + unsigned int buf_count; + u8 buf[AES_BLOCK_SIZE]; +}; + /***** SHA related defines *****/ #define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE @@ -171,6 +180,19 @@ struct ccp_sha_req_ctx { struct ccp_cmd cmd; }; +struct ccp_sha_exp_ctx { + enum ccp_sha_type type; + + u64 msg_bits; + + unsigned int first; + + u8 ctx[MAX_SHA_CONTEXT_SIZE]; + + unsigned int buf_count; + u8 buf[MAX_SHA_BLOCK_SIZE]; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c new file mode 100644 index 000000000000..7d5eab49179e --- /dev/null +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -0,0 +1,533 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/ccp.h> + +#include "ccp-dev.h" + +static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + struct ccp_device *ccp = cmd_q->ccp; + void __iomem *cr_addr; + u32 cr0, cmd; + unsigned int i; + int ret = 0; + + /* We could read a status register to see how many free slots + * are actually available, but reading that register resets it + * and you could lose some error information. + */ + cmd_q->free_slots--; + + cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT) + | (op->jobid << REQ0_JOBID_SHIFT) + | REQ0_WAIT_FOR_WRITE; + + if (op->soc) + cr0 |= REQ0_STOP_ON_COMPLETE + | REQ0_INT_ON_COMPLETE; + + if (op->ioc || !cmd_q->free_slots) + cr0 |= REQ0_INT_ON_COMPLETE; + + /* Start at CMD_REQ1 */ + cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR; + + mutex_lock(&ccp->req_mutex); + + /* Write CMD_REQ1 through CMD_REQx first */ + for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR) + iowrite32(*(cr + i), cr_addr); + + /* Tell the CCP to start */ + wmb(); + iowrite32(cr0, ccp->io_regs + CMD_REQ0); + + mutex_unlock(&ccp->req_mutex); + + if (cr0 & REQ0_INT_ON_COMPLETE) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* On error delete all related jobs from the queue */ + cmd = (cmd_q->id << DEL_Q_ID_SHIFT) + | op->jobid; + + iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); + + if (!ret) + ret = -EIO; + } else if (op->soc) { + /* Delete just head job from the queue on SoC */ + cmd = DEL_Q_ACTIVE + | (cmd_q->id << DEL_Q_ID_SHIFT) + | op->jobid; + + iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); + } + + cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status); + + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp_perform_aes(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT) + | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) + | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) + | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) + | (op->ksb_key << REQ1_KEY_KSB_SHIFT); + cr[1] = op->src.u.dma.length - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + + if (op->u.aes.mode == CCP_AES_MODE_CFB) + cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT); + + if (op->eom) + cr[0] |= REQ1_EOM; + + if (op->init) + cr[0] |= REQ1_INIT; + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_xts_aes(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) + | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) + | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) + | (op->ksb_key << REQ1_KEY_KSB_SHIFT); + cr[1] = op->src.u.dma.length - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + + if (op->eom) + cr[0] |= REQ1_EOM; + + if (op->init) + cr[0] |= REQ1_INIT; + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_sha(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT) + | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT) + | REQ1_INIT; + cr[1] = op->src.u.dma.length - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + + if (op->eom) { + cr[0] |= REQ1_EOM; + cr[4] = lower_32_bits(op->u.sha.msg_bits); + cr[5] = upper_32_bits(op->u.sha.msg_bits); + } else { + cr[4] = 0; + cr[5] = 0; + } + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_rsa(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) + | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) + | (op->ksb_key << REQ1_KEY_KSB_SHIFT) + | REQ1_EOM; + cr[1] = op->u.rsa.input_len - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_passthru(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT) + | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT) + | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT); + + if (op->src.type == CCP_MEMTYPE_SYSTEM) + cr[1] = op->src.u.dma.length - 1; + else + cr[1] = op->dst.u.dma.length - 1; + + if (op->src.type == CCP_MEMTYPE_SYSTEM) { + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + + if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT); + } else { + cr[2] = op->src.u.ksb * CCP_KSB_BYTES; + cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT); + } + + if (op->dst.type == CCP_MEMTYPE_SYSTEM) { + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + } else { + cr[4] = op->dst.u.ksb * CCP_KSB_BYTES; + cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT); + } + + if (op->eom) + cr[0] |= REQ1_EOM; + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_perform_ecc(struct ccp_op *op) +{ + u32 cr[6]; + + /* Fill out the register contents for REQ1 through REQ6 */ + cr[0] = REQ1_ECC_AFFINE_CONVERT + | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT) + | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT) + | REQ1_EOM; + cr[1] = op->src.u.dma.length - 1; + cr[2] = ccp_addr_lo(&op->src.u.dma); + cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->src.u.dma); + cr[4] = ccp_addr_lo(&op->dst.u.dma); + cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) + | ccp_addr_hi(&op->dst.u.dma); + + return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); +} + +static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); + u32 trng_value; + int len = min_t(int, sizeof(trng_value), max); + + /* + * Locking is provided by the caller so we can update device + * hwrng-related fields safely + */ + trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); + if (!trng_value) { + /* Zero is returned if not data is available or if a + * bad-entropy error is present. Assume an error if + * we exceed TRNG_RETRIES reads of zero. + */ + if (ccp->hwrng_retries++ > TRNG_RETRIES) + return -EIO; + + return 0; + } + + /* Reset the counter and save the rng value */ + ccp->hwrng_retries = 0; + memcpy(data, &trng_value, len); + + return len; +} + +static int ccp_init(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct ccp_cmd_queue *cmd_q; + struct dma_pool *dma_pool; + char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; + unsigned int qmr, qim, i; + int ret; + + /* Find available queues */ + qim = 0; + qmr = ioread32(ccp->io_regs + Q_MASK_REG); + for (i = 0; i < MAX_HW_QUEUES; i++) { + if (!(qmr & (1 << i))) + continue; + + /* Allocate a dma pool for this queue */ + snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", + ccp->name, i); + dma_pool = dma_pool_create(dma_pool_name, dev, + CCP_DMAPOOL_MAX_SIZE, + CCP_DMAPOOL_ALIGN, 0); + if (!dma_pool) { + dev_err(dev, "unable to allocate dma pool\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; + ccp->cmd_q_count++; + + cmd_q->ccp = ccp; + cmd_q->id = i; + cmd_q->dma_pool = dma_pool; + + /* Reserve 2 KSB regions for the queue */ + cmd_q->ksb_key = KSB_START + ccp->ksb_start++; + cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; + ccp->ksb_count -= 2; + + /* Preset some register values and masks that are queue + * number dependent + */ + cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE + + (CMD_Q_STATUS_INCR * i); + cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE + + (CMD_Q_STATUS_INCR * i); + cmd_q->int_ok = 1 << (i * 2); + cmd_q->int_err = 1 << ((i * 2) + 1); + + cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); + + init_waitqueue_head(&cmd_q->int_queue); + + /* Build queue interrupt mask (two interrupts per queue) */ + qim |= cmd_q->int_ok | cmd_q->int_err; + +#ifdef CONFIG_ARM64 + /* For arm64 set the recommended queue cache settings */ + iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + + (CMD_Q_CACHE_INC * i)); +#endif + + dev_dbg(dev, "queue #%u available\n", i); + } + if (ccp->cmd_q_count == 0) { + dev_notice(dev, "no command queues available\n"); + ret = -EIO; + goto e_pool; + } + dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); + + /* Disable and clear interrupts until ready */ + iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + + /* Request an irq */ + ret = ccp->get_irq(ccp); + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_pool; + } + + /* Initialize the queues used to wait for KSB space and suspend */ + init_waitqueue_head(&ccp->ksb_queue); + init_waitqueue_head(&ccp->suspend_queue); + + /* Create a kthread for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct task_struct *kthread; + + cmd_q = &ccp->cmd_q[i]; + + kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, + "%s-q%u", ccp->name, cmd_q->id); + if (IS_ERR(kthread)) { + dev_err(dev, "error creating queue thread (%ld)\n", + PTR_ERR(kthread)); + ret = PTR_ERR(kthread); + goto e_kthread; + } + + cmd_q->kthread = kthread; + wake_up_process(kthread); + } + + /* Register the RNG */ + ccp->hwrng.name = ccp->rngname; + ccp->hwrng.read = ccp_trng_read; + ret = hwrng_register(&ccp->hwrng); + if (ret) { + dev_err(dev, "error registering hwrng (%d)\n", ret); + goto e_kthread; + } + + ccp_add_device(ccp); + + /* Enable interrupts */ + iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); + + return 0; + +e_kthread: + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + ccp->free_irq(ccp); + +e_pool: + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + return ret; +} + +static void ccp_destroy(struct ccp_device *ccp) +{ + struct ccp_cmd_queue *cmd_q; + struct ccp_cmd *cmd; + unsigned int qim, i; + + /* Remove this device from the list of available units first */ + ccp_del_device(ccp); + + /* Unregister the RNG */ + hwrng_unregister(&ccp->hwrng); + + /* Stop the queue kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + /* Build queue interrupt mask (two interrupt masks per queue) */ + qim = 0; + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + qim |= cmd_q->int_ok | cmd_q->int_err; + } + + /* Disable and clear interrupts */ + iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + + ccp->free_irq(ccp); + + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + /* Flush the cmd and backlog queue */ + while (!list_empty(&ccp->cmd)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } + while (!list_empty(&ccp->backlog)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } +} + +static irqreturn_t ccp_irq_handler(int irq, void *data) +{ + struct device *dev = data; + struct ccp_device *ccp = dev_get_drvdata(dev); + struct ccp_cmd_queue *cmd_q; + u32 q_int, status; + unsigned int i; + + status = ioread32(ccp->io_regs + IRQ_STATUS_REG); + + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + q_int = status & (cmd_q->int_ok | cmd_q->int_err); + if (q_int) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + cmd_q->int_rcvd = 1; + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); + wake_up_interruptible(&cmd_q->int_queue); + } + } + + return IRQ_HANDLED; +} + +static struct ccp_actions ccp3_actions = { + .perform_aes = ccp_perform_aes, + .perform_xts_aes = ccp_perform_xts_aes, + .perform_sha = ccp_perform_sha, + .perform_rsa = ccp_perform_rsa, + .perform_passthru = ccp_perform_passthru, + .perform_ecc = ccp_perform_ecc, + .init = ccp_init, + .destroy = ccp_destroy, + .irqhandler = ccp_irq_handler, +}; + +struct ccp_vdata ccpv3 = { + .version = CCP_VERSION(3, 0), + .perform = &ccp3_actions, +}; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 861bacc1bb94..336e5b780fcb 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -16,6 +16,8 @@ #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/rwlock_types.h> +#include <linux/types.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/hw_random.h> @@ -37,20 +39,107 @@ struct ccp_tasklet_data { struct ccp_cmd *cmd; }; -static struct ccp_device *ccp_dev; -static inline struct ccp_device *ccp_get_device(void) +/* List of CCPs, CCP count, read-write access lock, and access functions + * + * Lock structure: get ccp_unit_lock for reading whenever we need to + * examine the CCP list. While holding it for reading we can acquire + * the RR lock to update the round-robin next-CCP pointer. The unit lock + * must be acquired before the RR lock. + * + * If the unit-lock is acquired for writing, we have total control over + * the list, so there's no value in getting the RR lock. + */ +static DEFINE_RWLOCK(ccp_unit_lock); +static LIST_HEAD(ccp_units); + +/* Round-robin counter */ +static DEFINE_RWLOCK(ccp_rr_lock); +static struct ccp_device *ccp_rr; + +/* Ever-increasing value to produce unique unit numbers */ +static atomic_t ccp_unit_ordinal; +unsigned int ccp_increment_unit_ordinal(void) { - return ccp_dev; + return atomic_inc_return(&ccp_unit_ordinal); } -static inline void ccp_add_device(struct ccp_device *ccp) +/** + * ccp_add_device - add a CCP device to the list + * + * @ccp: ccp_device struct pointer + * + * Put this CCP on the unit list, which makes it available + * for use. + * + * Returns zero if a CCP device is present, -ENODEV otherwise. + */ +void ccp_add_device(struct ccp_device *ccp) { - ccp_dev = ccp; + unsigned long flags; + + write_lock_irqsave(&ccp_unit_lock, flags); + list_add_tail(&ccp->entry, &ccp_units); + if (!ccp_rr) + /* We already have the list lock (we're first) so this + * pointer can't change on us. Set its initial value. + */ + ccp_rr = ccp; + write_unlock_irqrestore(&ccp_unit_lock, flags); } -static inline void ccp_del_device(struct ccp_device *ccp) +/** + * ccp_del_device - remove a CCP device from the list + * + * @ccp: ccp_device struct pointer + * + * Remove this unit from the list of devices. If the next device + * up for use is this one, adjust the pointer. If this is the last + * device, NULL the pointer. + */ +void ccp_del_device(struct ccp_device *ccp) { - ccp_dev = NULL; + unsigned long flags; + + write_lock_irqsave(&ccp_unit_lock, flags); + if (ccp_rr == ccp) { + /* ccp_unit_lock is read/write; any read access + * will be suspended while we make changes to the + * list and RR pointer. + */ + if (list_is_last(&ccp_rr->entry, &ccp_units)) + ccp_rr = list_first_entry(&ccp_units, struct ccp_device, + entry); + else + ccp_rr = list_next_entry(ccp_rr, entry); + } + list_del(&ccp->entry); + if (list_empty(&ccp_units)) + ccp_rr = NULL; + write_unlock_irqrestore(&ccp_unit_lock, flags); +} + +static struct ccp_device *ccp_get_device(void) +{ + unsigned long flags; + struct ccp_device *dp = NULL; + + /* We round-robin through the unit list. + * The (ccp_rr) pointer refers to the next unit to use. + */ + read_lock_irqsave(&ccp_unit_lock, flags); + if (!list_empty(&ccp_units)) { + write_lock_irqsave(&ccp_rr_lock, flags); + dp = ccp_rr; + if (list_is_last(&ccp_rr->entry, &ccp_units)) + ccp_rr = list_first_entry(&ccp_units, struct ccp_device, + entry); + else + ccp_rr = list_next_entry(ccp_rr, entry); + write_unlock_irqrestore(&ccp_rr_lock, flags); + } + read_unlock_irqrestore(&ccp_unit_lock, flags); + + return dp; } /** @@ -60,14 +149,41 @@ static inline void ccp_del_device(struct ccp_device *ccp) */ int ccp_present(void) { - if (ccp_get_device()) - return 0; + unsigned long flags; + int ret; - return -ENODEV; + read_lock_irqsave(&ccp_unit_lock, flags); + ret = list_empty(&ccp_units); + read_unlock_irqrestore(&ccp_unit_lock, flags); + + return ret ? -ENODEV : 0; } EXPORT_SYMBOL_GPL(ccp_present); /** + * ccp_version - get the version of the CCP device + * + * Returns the version from the first unit on the list; + * otherwise a zero if no CCP device is present + */ +unsigned int ccp_version(void) +{ + struct ccp_device *dp; + unsigned long flags; + int ret = 0; + + read_lock_irqsave(&ccp_unit_lock, flags); + if (!list_empty(&ccp_units)) { + dp = list_first_entry(&ccp_units, struct ccp_device, entry); + ret = dp->vdata->version; + } + read_unlock_irqrestore(&ccp_unit_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(ccp_version); + +/** * ccp_enqueue_cmd - queue an operation for processing by the CCP * * @cmd: ccp_cmd struct to be processed @@ -221,7 +337,12 @@ static void ccp_do_cmd_complete(unsigned long data) complete(&tdata->completion); } -static int ccp_cmd_queue_thread(void *data) +/** + * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue + * + * @data: thread-specific data + */ +int ccp_cmd_queue_thread(void *data) { struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; struct ccp_cmd *cmd; @@ -257,35 +378,6 @@ static int ccp_cmd_queue_thread(void *data) return 0; } -static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) -{ - struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); - u32 trng_value; - int len = min_t(int, sizeof(trng_value), max); - - /* - * Locking is provided by the caller so we can update device - * hwrng-related fields safely - */ - trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); - if (!trng_value) { - /* Zero is returned if not data is available or if a - * bad-entropy error is present. Assume an error if - * we exceed TRNG_RETRIES reads of zero. - */ - if (ccp->hwrng_retries++ > TRNG_RETRIES) - return -EIO; - - return 0; - } - - /* Reset the counter and save the rng value */ - ccp->hwrng_retries = 0; - memcpy(data, &trng_value, len); - - return len; -} - /** * ccp_alloc_struct - allocate and initialize the ccp_device struct * @@ -309,253 +401,11 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) ccp->ksb_count = KSB_COUNT; ccp->ksb_start = 0; - return ccp; -} - -/** - * ccp_init - initialize the CCP device - * - * @ccp: ccp_device struct - */ -int ccp_init(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct ccp_cmd_queue *cmd_q; - struct dma_pool *dma_pool; - char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; - unsigned int qmr, qim, i; - int ret; - - /* Find available queues */ - qim = 0; - qmr = ioread32(ccp->io_regs + Q_MASK_REG); - for (i = 0; i < MAX_HW_QUEUES; i++) { - if (!(qmr & (1 << i))) - continue; - - /* Allocate a dma pool for this queue */ - snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i); - dma_pool = dma_pool_create(dma_pool_name, dev, - CCP_DMAPOOL_MAX_SIZE, - CCP_DMAPOOL_ALIGN, 0); - if (!dma_pool) { - dev_err(dev, "unable to allocate dma pool\n"); - ret = -ENOMEM; - goto e_pool; - } - - cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; - ccp->cmd_q_count++; - - cmd_q->ccp = ccp; - cmd_q->id = i; - cmd_q->dma_pool = dma_pool; - - /* Reserve 2 KSB regions for the queue */ - cmd_q->ksb_key = KSB_START + ccp->ksb_start++; - cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; - ccp->ksb_count -= 2; - - /* Preset some register values and masks that are queue - * number dependent - */ - cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE + - (CMD_Q_STATUS_INCR * i); - cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE + - (CMD_Q_STATUS_INCR * i); - cmd_q->int_ok = 1 << (i * 2); - cmd_q->int_err = 1 << ((i * 2) + 1); - - cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); - - init_waitqueue_head(&cmd_q->int_queue); - - /* Build queue interrupt mask (two interrupts per queue) */ - qim |= cmd_q->int_ok | cmd_q->int_err; + ccp->ord = ccp_increment_unit_ordinal(); + snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); + snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); -#ifdef CONFIG_ARM64 - /* For arm64 set the recommended queue cache settings */ - iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + - (CMD_Q_CACHE_INC * i)); -#endif - - dev_dbg(dev, "queue #%u available\n", i); - } - if (ccp->cmd_q_count == 0) { - dev_notice(dev, "no command queues available\n"); - ret = -EIO; - goto e_pool; - } - dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); - - /* Disable and clear interrupts until ready */ - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - - ioread32(cmd_q->reg_int_status); - ioread32(cmd_q->reg_status); - } - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); - - /* Request an irq */ - ret = ccp->get_irq(ccp); - if (ret) { - dev_err(dev, "unable to allocate an IRQ\n"); - goto e_pool; - } - - /* Initialize the queues used to wait for KSB space and suspend */ - init_waitqueue_head(&ccp->ksb_queue); - init_waitqueue_head(&ccp->suspend_queue); - - /* Create a kthread for each queue */ - for (i = 0; i < ccp->cmd_q_count; i++) { - struct task_struct *kthread; - - cmd_q = &ccp->cmd_q[i]; - - kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, - "ccp-q%u", cmd_q->id); - if (IS_ERR(kthread)) { - dev_err(dev, "error creating queue thread (%ld)\n", - PTR_ERR(kthread)); - ret = PTR_ERR(kthread); - goto e_kthread; - } - - cmd_q->kthread = kthread; - wake_up_process(kthread); - } - - /* Register the RNG */ - ccp->hwrng.name = "ccp-rng"; - ccp->hwrng.read = ccp_trng_read; - ret = hwrng_register(&ccp->hwrng); - if (ret) { - dev_err(dev, "error registering hwrng (%d)\n", ret); - goto e_kthread; - } - - /* Make the device struct available before enabling interrupts */ - ccp_add_device(ccp); - - /* Enable interrupts */ - iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); - - return 0; - -e_kthread: - for (i = 0; i < ccp->cmd_q_count; i++) - if (ccp->cmd_q[i].kthread) - kthread_stop(ccp->cmd_q[i].kthread); - - ccp->free_irq(ccp); - -e_pool: - for (i = 0; i < ccp->cmd_q_count; i++) - dma_pool_destroy(ccp->cmd_q[i].dma_pool); - - return ret; -} - -/** - * ccp_destroy - tear down the CCP device - * - * @ccp: ccp_device struct - */ -void ccp_destroy(struct ccp_device *ccp) -{ - struct ccp_cmd_queue *cmd_q; - struct ccp_cmd *cmd; - unsigned int qim, i; - - /* Remove general access to the device struct */ - ccp_del_device(ccp); - - /* Unregister the RNG */ - hwrng_unregister(&ccp->hwrng); - - /* Stop the queue kthreads */ - for (i = 0; i < ccp->cmd_q_count; i++) - if (ccp->cmd_q[i].kthread) - kthread_stop(ccp->cmd_q[i].kthread); - - /* Build queue interrupt mask (two interrupt masks per queue) */ - qim = 0; - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - qim |= cmd_q->int_ok | cmd_q->int_err; - } - - /* Disable and clear interrupts */ - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - - ioread32(cmd_q->reg_int_status); - ioread32(cmd_q->reg_status); - } - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); - - ccp->free_irq(ccp); - - for (i = 0; i < ccp->cmd_q_count; i++) - dma_pool_destroy(ccp->cmd_q[i].dma_pool); - - /* Flush the cmd and backlog queue */ - while (!list_empty(&ccp->cmd)) { - /* Invoke the callback directly with an error code */ - cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); - list_del(&cmd->entry); - cmd->callback(cmd->data, -ENODEV); - } - while (!list_empty(&ccp->backlog)) { - /* Invoke the callback directly with an error code */ - cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); - list_del(&cmd->entry); - cmd->callback(cmd->data, -ENODEV); - } -} - -/** - * ccp_irq_handler - handle interrupts generated by the CCP device - * - * @irq: the irq associated with the interrupt - * @data: the data value supplied when the irq was created - */ -irqreturn_t ccp_irq_handler(int irq, void *data) -{ - struct device *dev = data; - struct ccp_device *ccp = dev_get_drvdata(dev); - struct ccp_cmd_queue *cmd_q; - u32 q_int, status; - unsigned int i; - - status = ioread32(ccp->io_regs + IRQ_STATUS_REG); - - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - - q_int = status & (cmd_q->int_ok | cmd_q->int_err); - if (q_int) { - cmd_q->int_status = status; - cmd_q->q_status = ioread32(cmd_q->reg_status); - cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); - - /* On error, only save the first error value */ - if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) - cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - - cmd_q->int_rcvd = 1; - - /* Acknowledge the interrupt and wake the kthread */ - iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); - wake_up_interruptible(&cmd_q->int_queue); - } - } - - return IRQ_HANDLED; + return ccp; } #ifdef CONFIG_PM @@ -577,41 +427,22 @@ bool ccp_queues_suspended(struct ccp_device *ccp) } #endif -#ifdef CONFIG_X86 -static const struct x86_cpu_id ccp_support[] = { - { X86_VENDOR_AMD, 22, }, - { }, -}; -#endif - static int __init ccp_mod_init(void) { #ifdef CONFIG_X86 - struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; int ret; - if (!x86_match_cpu(ccp_support)) - return -ENODEV; - - switch (cpuinfo->x86) { - case 22: - if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63)) - return -ENODEV; - - ret = ccp_pci_init(); - if (ret) - return ret; - - /* Don't leave the driver loaded if init failed */ - if (!ccp_get_device()) { - ccp_pci_exit(); - return -ENODEV; - } - - return 0; + ret = ccp_pci_init(); + if (ret) + return ret; - break; + /* Don't leave the driver loaded if init failed */ + if (ccp_present() != 0) { + ccp_pci_exit(); + return -ENODEV; } + + return 0; #endif #ifdef CONFIG_ARM64 @@ -622,7 +453,7 @@ static int __init ccp_mod_init(void) return ret; /* Don't leave the driver loaded if init failed */ - if (!ccp_get_device()) { + if (ccp_present() != 0) { ccp_platform_exit(); return -ENODEV; } @@ -636,13 +467,7 @@ static int __init ccp_mod_init(void) static void __exit ccp_mod_exit(void) { #ifdef CONFIG_X86 - struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; - - switch (cpuinfo->x86) { - case 22: - ccp_pci_exit(); - break; - } + ccp_pci_exit(); #endif #ifdef CONFIG_ARM64 diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 6ff89031fb96..7745d0be491d 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -23,6 +23,7 @@ #include <linux/hw_random.h> #include <linux/bitops.h> +#define MAX_CCP_NAME_LEN 16 #define MAX_DMAPOOL_NAME_LEN 32 #define MAX_HW_QUEUES 5 @@ -140,6 +141,29 @@ #define CCP_ECC_RESULT_OFFSET 60 #define CCP_ECC_RESULT_SUCCESS 0x0001 +struct ccp_op; + +/* Structure for computation functions that are device-specific */ +struct ccp_actions { + int (*perform_aes)(struct ccp_op *); + int (*perform_xts_aes)(struct ccp_op *); + int (*perform_sha)(struct ccp_op *); + int (*perform_rsa)(struct ccp_op *); + int (*perform_passthru)(struct ccp_op *); + int (*perform_ecc)(struct ccp_op *); + int (*init)(struct ccp_device *); + void (*destroy)(struct ccp_device *); + irqreturn_t (*irqhandler)(int, void *); +}; + +/* Structure to hold CCP version-specific values */ +struct ccp_vdata { + unsigned int version; + struct ccp_actions *perform; +}; + +extern struct ccp_vdata ccpv3; + struct ccp_device; struct ccp_cmd; @@ -184,6 +208,13 @@ struct ccp_cmd_queue { } ____cacheline_aligned; struct ccp_device { + struct list_head entry; + + struct ccp_vdata *vdata; + unsigned int ord; + char name[MAX_CCP_NAME_LEN]; + char rngname[MAX_CCP_NAME_LEN]; + struct device *dev; /* @@ -258,18 +289,132 @@ struct ccp_device { unsigned int axcache; }; +enum ccp_memtype { + CCP_MEMTYPE_SYSTEM = 0, + CCP_MEMTYPE_KSB, + CCP_MEMTYPE_LOCAL, + CCP_MEMTYPE__LAST, +}; + +struct ccp_dma_info { + dma_addr_t address; + unsigned int offset; + unsigned int length; + enum dma_data_direction dir; +}; + +struct ccp_dm_workarea { + struct device *dev; + struct dma_pool *dma_pool; + unsigned int length; + + u8 *address; + struct ccp_dma_info dma; +}; + +struct ccp_sg_workarea { + struct scatterlist *sg; + int nents; + + struct scatterlist *dma_sg; + struct device *dma_dev; + unsigned int dma_count; + enum dma_data_direction dma_dir; + + unsigned int sg_used; + + u64 bytes_left; +}; + +struct ccp_data { + struct ccp_sg_workarea sg_wa; + struct ccp_dm_workarea dm_wa; +}; + +struct ccp_mem { + enum ccp_memtype type; + union { + struct ccp_dma_info dma; + u32 ksb; + } u; +}; + +struct ccp_aes_op { + enum ccp_aes_type type; + enum ccp_aes_mode mode; + enum ccp_aes_action action; +}; + +struct ccp_xts_aes_op { + enum ccp_aes_action action; + enum ccp_xts_aes_unit_size unit_size; +}; + +struct ccp_sha_op { + enum ccp_sha_type type; + u64 msg_bits; +}; + +struct ccp_rsa_op { + u32 mod_size; + u32 input_len; +}; + +struct ccp_passthru_op { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; +}; + +struct ccp_ecc_op { + enum ccp_ecc_function function; +}; + +struct ccp_op { + struct ccp_cmd_queue *cmd_q; + + u32 jobid; + u32 ioc; + u32 soc; + u32 ksb_key; + u32 ksb_ctx; + u32 init; + u32 eom; + + struct ccp_mem src; + struct ccp_mem dst; + + union { + struct ccp_aes_op aes; + struct ccp_xts_aes_op xts; + struct ccp_sha_op sha; + struct ccp_rsa_op rsa; + struct ccp_passthru_op passthru; + struct ccp_ecc_op ecc; + } u; +}; + +static inline u32 ccp_addr_lo(struct ccp_dma_info *info) +{ + return lower_32_bits(info->address + info->offset); +} + +static inline u32 ccp_addr_hi(struct ccp_dma_info *info) +{ + return upper_32_bits(info->address + info->offset) & 0x0000ffff; +} + int ccp_pci_init(void); void ccp_pci_exit(void); int ccp_platform_init(void); void ccp_platform_exit(void); +void ccp_add_device(struct ccp_device *ccp); +void ccp_del_device(struct ccp_device *ccp); + struct ccp_device *ccp_alloc_struct(struct device *dev); -int ccp_init(struct ccp_device *ccp); -void ccp_destroy(struct ccp_device *ccp); bool ccp_queues_suspended(struct ccp_device *ccp); - -irqreturn_t ccp_irq_handler(int irq, void *data); +int ccp_cmd_queue_thread(void *data); int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 6613aee79b87..eefdf595f758 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -13,124 +13,12 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> -#include <linux/pci_ids.h> -#include <linux/kthread.h> -#include <linux/sched.h> #include <linux/interrupt.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> -#include <linux/delay.h> -#include <linux/ccp.h> -#include <linux/scatterlist.h> #include <crypto/scatterwalk.h> -#include <crypto/sha.h> +#include <linux/ccp.h> #include "ccp-dev.h" -enum ccp_memtype { - CCP_MEMTYPE_SYSTEM = 0, - CCP_MEMTYPE_KSB, - CCP_MEMTYPE_LOCAL, - CCP_MEMTYPE__LAST, -}; - -struct ccp_dma_info { - dma_addr_t address; - unsigned int offset; - unsigned int length; - enum dma_data_direction dir; -}; - -struct ccp_dm_workarea { - struct device *dev; - struct dma_pool *dma_pool; - unsigned int length; - - u8 *address; - struct ccp_dma_info dma; -}; - -struct ccp_sg_workarea { - struct scatterlist *sg; - int nents; - - struct scatterlist *dma_sg; - struct device *dma_dev; - unsigned int dma_count; - enum dma_data_direction dma_dir; - - unsigned int sg_used; - - u64 bytes_left; -}; - -struct ccp_data { - struct ccp_sg_workarea sg_wa; - struct ccp_dm_workarea dm_wa; -}; - -struct ccp_mem { - enum ccp_memtype type; - union { - struct ccp_dma_info dma; - u32 ksb; - } u; -}; - -struct ccp_aes_op { - enum ccp_aes_type type; - enum ccp_aes_mode mode; - enum ccp_aes_action action; -}; - -struct ccp_xts_aes_op { - enum ccp_aes_action action; - enum ccp_xts_aes_unit_size unit_size; -}; - -struct ccp_sha_op { - enum ccp_sha_type type; - u64 msg_bits; -}; - -struct ccp_rsa_op { - u32 mod_size; - u32 input_len; -}; - -struct ccp_passthru_op { - enum ccp_passthru_bitwise bit_mod; - enum ccp_passthru_byteswap byte_swap; -}; - -struct ccp_ecc_op { - enum ccp_ecc_function function; -}; - -struct ccp_op { - struct ccp_cmd_queue *cmd_q; - - u32 jobid; - u32 ioc; - u32 soc; - u32 ksb_key; - u32 ksb_ctx; - u32 init; - u32 eom; - - struct ccp_mem src; - struct ccp_mem dst; - - union { - struct ccp_aes_op aes; - struct ccp_xts_aes_op xts; - struct ccp_sha_op sha; - struct ccp_rsa_op rsa; - struct ccp_passthru_op passthru; - struct ccp_ecc_op ecc; - } u; -}; - /* SHA initial context values */ static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), @@ -152,253 +40,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; -static u32 ccp_addr_lo(struct ccp_dma_info *info) -{ - return lower_32_bits(info->address + info->offset); -} - -static u32 ccp_addr_hi(struct ccp_dma_info *info) -{ - return upper_32_bits(info->address + info->offset) & 0x0000ffff; -} - -static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) -{ - struct ccp_cmd_queue *cmd_q = op->cmd_q; - struct ccp_device *ccp = cmd_q->ccp; - void __iomem *cr_addr; - u32 cr0, cmd; - unsigned int i; - int ret = 0; - - /* We could read a status register to see how many free slots - * are actually available, but reading that register resets it - * and you could lose some error information. - */ - cmd_q->free_slots--; - - cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT) - | (op->jobid << REQ0_JOBID_SHIFT) - | REQ0_WAIT_FOR_WRITE; - - if (op->soc) - cr0 |= REQ0_STOP_ON_COMPLETE - | REQ0_INT_ON_COMPLETE; - - if (op->ioc || !cmd_q->free_slots) - cr0 |= REQ0_INT_ON_COMPLETE; - - /* Start at CMD_REQ1 */ - cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR; - - mutex_lock(&ccp->req_mutex); - - /* Write CMD_REQ1 through CMD_REQx first */ - for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR) - iowrite32(*(cr + i), cr_addr); - - /* Tell the CCP to start */ - wmb(); - iowrite32(cr0, ccp->io_regs + CMD_REQ0); - - mutex_unlock(&ccp->req_mutex); - - if (cr0 & REQ0_INT_ON_COMPLETE) { - /* Wait for the job to complete */ - ret = wait_event_interruptible(cmd_q->int_queue, - cmd_q->int_rcvd); - if (ret || cmd_q->cmd_error) { - /* On error delete all related jobs from the queue */ - cmd = (cmd_q->id << DEL_Q_ID_SHIFT) - | op->jobid; - - iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); - - if (!ret) - ret = -EIO; - } else if (op->soc) { - /* Delete just head job from the queue on SoC */ - cmd = DEL_Q_ACTIVE - | (cmd_q->id << DEL_Q_ID_SHIFT) - | op->jobid; - - iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); - } - - cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status); - - cmd_q->int_rcvd = 0; - } - - return ret; -} - -static int ccp_perform_aes(struct ccp_op *op) -{ - u32 cr[6]; - - /* Fill out the register contents for REQ1 through REQ6 */ - cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT) - | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) - | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) - | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) - | (op->ksb_key << REQ1_KEY_KSB_SHIFT); - cr[1] = op->src.u.dma.length - 1; - cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) - | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->src.u.dma); - cr[4] = ccp_addr_lo(&op->dst.u.dma); - cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->dst.u.dma); - - if (op->u.aes.mode == CCP_AES_MODE_CFB) - cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT); - - if (op->eom) - cr[0] |= REQ1_EOM; - - if (op->init) - cr[0] |= REQ1_INIT; - - return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); -} - -static int ccp_perform_xts_aes(struct ccp_op *op) -{ - u32 cr[6]; - - /* Fill out the register contents for REQ1 through REQ6 */ - cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) - | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) - | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) - | (op->ksb_key << REQ1_KEY_KSB_SHIFT); - cr[1] = op->src.u.dma.length - 1; - cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) - | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->src.u.dma); - cr[4] = ccp_addr_lo(&op->dst.u.dma); - cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->dst.u.dma); - - if (op->eom) - cr[0] |= REQ1_EOM; - - if (op->init) - cr[0] |= REQ1_INIT; - - return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); -} - -static int ccp_perform_sha(struct ccp_op *op) -{ - u32 cr[6]; - - /* Fill out the register contents for REQ1 through REQ6 */ - cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT) - | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT) - | REQ1_INIT; - cr[1] = op->src.u.dma.length - 1; - cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) - | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->src.u.dma); - - if (op->eom) { - cr[0] |= REQ1_EOM; - cr[4] = lower_32_bits(op->u.sha.msg_bits); - cr[5] = upper_32_bits(op->u.sha.msg_bits); - } else { - cr[4] = 0; - cr[5] = 0; - } - - return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); -} - -static int ccp_perform_rsa(struct ccp_op *op) -{ - u32 cr[6]; - - /* Fill out the register contents for REQ1 through REQ6 */ - cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) - | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) - | (op->ksb_key << REQ1_KEY_KSB_SHIFT) - | REQ1_EOM; - cr[1] = op->u.rsa.input_len - 1; - cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) - | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->src.u.dma); - cr[4] = ccp_addr_lo(&op->dst.u.dma); - cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->dst.u.dma); - - return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); -} - -static int ccp_perform_passthru(struct ccp_op *op) -{ - u32 cr[6]; - - /* Fill out the register contents for REQ1 through REQ6 */ - cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT) - | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT) - | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT); - - if (op->src.type == CCP_MEMTYPE_SYSTEM) - cr[1] = op->src.u.dma.length - 1; - else - cr[1] = op->dst.u.dma.length - 1; - - if (op->src.type == CCP_MEMTYPE_SYSTEM) { - cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->src.u.dma); - - if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) - cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT); - } else { - cr[2] = op->src.u.ksb * CCP_KSB_BYTES; - cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT); - } - - if (op->dst.type == CCP_MEMTYPE_SYSTEM) { - cr[4] = ccp_addr_lo(&op->dst.u.dma); - cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->dst.u.dma); - } else { - cr[4] = op->dst.u.ksb * CCP_KSB_BYTES; - cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT); - } - - if (op->eom) - cr[0] |= REQ1_EOM; - - return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); -} - -static int ccp_perform_ecc(struct ccp_op *op) -{ - u32 cr[6]; - - /* Fill out the register contents for REQ1 through REQ6 */ - cr[0] = REQ1_ECC_AFFINE_CONVERT - | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT) - | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT) - | REQ1_EOM; - cr[1] = op->src.u.dma.length - 1; - cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->src.u.dma); - cr[4] = ccp_addr_lo(&op->dst.u.dma); - cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) - | ccp_addr_hi(&op->dst.u.dma); - - return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); -} - static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count) { int start; @@ -837,7 +478,7 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, op.u.passthru.byte_swap = byte_swap; - return ccp_perform_passthru(&op); + return cmd_q->ccp->vdata->perform->perform_passthru(&op); } static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q, @@ -969,7 +610,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, } } - ret = ccp_perform_aes(&op); + ret = cmd_q->ccp->vdata->perform->perform_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; @@ -1131,7 +772,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.soc = 1; } - ret = ccp_perform_aes(&op); + ret = cmd_q->ccp->vdata->perform->perform_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -1296,7 +937,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, if (!src.sg_wa.bytes_left) op.eom = 1; - ret = ccp_perform_xts_aes(&op); + ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -1453,7 +1094,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sha->final && !src.sg_wa.bytes_left) op.eom = 1; - ret = ccp_perform_sha(&op); + ret = cmd_q->ccp->vdata->perform->perform_sha(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; @@ -1633,7 +1274,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.u.rsa.mod_size = rsa->key_size; op.u.rsa.input_len = i_len; - ret = ccp_perform_rsa(&op); + ret = cmd_q->ccp->vdata->perform->perform_rsa(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -1758,7 +1399,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, op.dst.u.dma.offset = dst.sg_wa.sg_used; op.dst.u.dma.length = op.src.u.dma.length; - ret = ccp_perform_passthru(&op); + ret = cmd_q->ccp->vdata->perform->perform_passthru(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -1870,7 +1511,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.u.ecc.function = cmd->u.ecc.function; - ret = ccp_perform_ecc(&op); + ret = cmd_q->ccp->vdata->perform->perform_ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -2034,7 +1675,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.u.ecc.function = cmd->u.ecc.function; - ret = ccp_perform_ecc(&op); + ret = cmd_q->ccp->vdata->perform->perform_ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 7690467c42f8..0bf262e36b6b 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -59,9 +59,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp) ccp_pci->msix_count = ret; for (v = 0; v < ccp_pci->msix_count; v++) { /* Set the interrupt names and request the irqs */ - snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v); + snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", + ccp->name, v); ccp_pci->msix[v].vector = msix_entry[v].vector; - ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler, + ret = request_irq(ccp_pci->msix[v].vector, + ccp->vdata->perform->irqhandler, 0, ccp_pci->msix[v].name, dev); if (ret) { dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", @@ -94,7 +96,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp) return ret; ccp->irq = pdev->irq; - ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); + ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, + ccp->name, dev); if (ret) { dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); goto e_msi; @@ -179,6 +182,12 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto e_err; ccp->dev_specific = ccp_pci; + ccp->vdata = (struct ccp_vdata *)id->driver_data; + if (!ccp->vdata || !ccp->vdata->version) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } ccp->get_irq = ccp_get_irqs; ccp->free_irq = ccp_free_irqs; @@ -221,7 +230,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_set_drvdata(dev, ccp); - ret = ccp_init(ccp); + ret = ccp->vdata->perform->init(ccp); if (ret) goto e_iomap; @@ -251,7 +260,7 @@ static void ccp_pci_remove(struct pci_dev *pdev) if (!ccp) return; - ccp_destroy(ccp); + ccp->vdata->perform->destroy(ccp); pci_iounmap(pdev, ccp->io_map); @@ -312,7 +321,7 @@ static int ccp_pci_resume(struct pci_dev *pdev) #endif static const struct pci_device_id ccp_pci_table[] = { - { PCI_VDEVICE(AMD, 0x1537), }, + { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index 66dd7c9d08c3..351f28d8c336 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2014 Advanced Micro Devices, Inc. + * Copyright (C) 2014,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -32,6 +32,33 @@ struct ccp_platform { int coherent; }; +static const struct acpi_device_id ccp_acpi_match[]; +static const struct of_device_id ccp_of_match[]; + +static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev) +{ +#ifdef CONFIG_OF + const struct of_device_id *match; + + match = of_match_node(ccp_of_match, pdev->dev.of_node); + if (match && match->data) + return (struct ccp_vdata *)match->data; +#endif + return 0; +} + +static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev) +{ +#ifdef CONFIG_ACPI + const struct acpi_device_id *match; + + match = acpi_match_device(ccp_acpi_match, &pdev->dev); + if (match && match->driver_data) + return (struct ccp_vdata *)match->driver_data; +#endif + return 0; +} + static int ccp_get_irq(struct ccp_device *ccp) { struct device *dev = ccp->dev; @@ -43,7 +70,8 @@ static int ccp_get_irq(struct ccp_device *ccp) return ret; ccp->irq = ret; - ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); + ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, + ccp->name, dev); if (ret) { dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); return ret; @@ -106,6 +134,13 @@ static int ccp_platform_probe(struct platform_device *pdev) goto e_err; ccp->dev_specific = ccp_platform; + ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev) + : ccp_get_acpi_version(pdev); + if (!ccp->vdata || !ccp->vdata->version) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } ccp->get_irq = ccp_get_irqs; ccp->free_irq = ccp_free_irqs; @@ -137,7 +172,7 @@ static int ccp_platform_probe(struct platform_device *pdev) dev_set_drvdata(dev, ccp); - ret = ccp_init(ccp); + ret = ccp->vdata->perform->init(ccp); if (ret) goto e_err; @@ -155,7 +190,7 @@ static int ccp_platform_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct ccp_device *ccp = dev_get_drvdata(dev); - ccp_destroy(ccp); + ccp->vdata->perform->destroy(ccp); dev_notice(dev, "disabled\n"); @@ -214,7 +249,7 @@ static int ccp_platform_resume(struct platform_device *pdev) #ifdef CONFIG_ACPI static const struct acpi_device_id ccp_acpi_match[] = { - { "AMDI0C00", 0 }, + { "AMDI0C00", (kernel_ulong_t)&ccpv3 }, { }, }; MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); @@ -222,7 +257,8 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); #ifdef CONFIG_OF static const struct of_device_id ccp_of_match[] = { - { .compatible = "amd,ccp-seattle-v1a" }, + { .compatible = "amd,ccp-seattle-v1a", + .data = (const void *)&ccpv3 }, { }, }; MODULE_DEVICE_TABLE(of, ccp_of_match); diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index e52496a172d0..2296934455fc 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1031,6 +1031,18 @@ static int aead_perform(struct aead_request *req, int encrypt, BUG_ON(ivsize && !req->iv); memcpy(crypt->iv, req->iv, ivsize); + buf = chainup_buffers(dev, req->src, crypt->auth_len, + &src_hook, flags, src_direction); + req_ctx->src = src_hook.next; + crypt->src_buf = src_hook.phys_next; + if (!buf) + goto free_buf_src; + + lastlen = buf->buf_len; + if (lastlen >= authsize) + crypt->icv_rev_aes = buf->phys_addr + + buf->buf_len - authsize; + req_ctx->dst = NULL; if (req->src != req->dst) { @@ -1055,20 +1067,6 @@ static int aead_perform(struct aead_request *req, int encrypt, } } - buf = chainup_buffers(dev, req->src, crypt->auth_len, - &src_hook, flags, src_direction); - req_ctx->src = src_hook.next; - crypt->src_buf = src_hook.phys_next; - if (!buf) - goto free_buf_src; - - if (!encrypt || !req_ctx->dst) { - lastlen = buf->buf_len; - if (lastlen >= authsize) - crypt->icv_rev_aes = buf->phys_addr + - buf->buf_len - authsize; - } - if (unlikely(lastlen < authsize)) { /* The 12 hmac bytes are scattered, * we need to copy them into a safe buffer */ diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index dd355bd19474..d420ec751c7c 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -36,6 +36,7 @@ #include <linux/interrupt.h> #include <crypto/scatterwalk.h> #include <crypto/aes.h> +#include <crypto/algapi.h> #define DST_MAXBURST 4 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) @@ -152,13 +153,10 @@ struct omap_aes_dev { unsigned long flags; int err; - spinlock_t lock; - struct crypto_queue queue; - struct tasklet_struct done_task; - struct tasklet_struct queue_task; struct ablkcipher_request *req; + struct crypto_engine *engine; /* * total is used by PIO mode for book keeping so introduce @@ -532,9 +530,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) pr_debug("err: %d\n", err); - dd->flags &= ~FLAGS_BUSY; - - req->base.complete(&req->base, err); + crypto_finalize_request(dd->engine, req, err); } static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) @@ -604,34 +600,25 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd) } static int omap_aes_handle_queue(struct omap_aes_dev *dd, - struct ablkcipher_request *req) + struct ablkcipher_request *req) { - struct crypto_async_request *async_req, *backlog; - struct omap_aes_ctx *ctx; - struct omap_aes_reqctx *rctx; - unsigned long flags; - int err, ret = 0, len; - - spin_lock_irqsave(&dd->lock, flags); if (req) - ret = ablkcipher_enqueue_request(&dd->queue, req); - if (dd->flags & FLAGS_BUSY) { - spin_unlock_irqrestore(&dd->lock, flags); - return ret; - } - backlog = crypto_get_backlog(&dd->queue); - async_req = crypto_dequeue_request(&dd->queue); - if (async_req) - dd->flags |= FLAGS_BUSY; - spin_unlock_irqrestore(&dd->lock, flags); + return crypto_transfer_request_to_engine(dd->engine, req); - if (!async_req) - return ret; + return 0; +} - if (backlog) - backlog->complete(backlog, -EINPROGRESS); +static int omap_aes_prepare_req(struct crypto_engine *engine, + struct ablkcipher_request *req) +{ + struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + struct omap_aes_dev *dd = omap_aes_find_dev(ctx); + struct omap_aes_reqctx *rctx; + int len; - req = ablkcipher_request_cast(async_req); + if (!dd) + return -ENODEV; /* assign new request to device */ dd->req = req; @@ -662,16 +649,20 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, dd->ctx = ctx; ctx->dd = dd; - err = omap_aes_write_ctrl(dd); - if (!err) - err = omap_aes_crypt_dma_start(dd); - if (err) { - /* aes_task will not finish it, so do it here */ - omap_aes_finish_req(dd, err); - tasklet_schedule(&dd->queue_task); - } + return omap_aes_write_ctrl(dd); +} - return ret; /* return ret, which is enqueue return value */ +static int omap_aes_crypt_req(struct crypto_engine *engine, + struct ablkcipher_request *req) +{ + struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + struct omap_aes_dev *dd = omap_aes_find_dev(ctx); + + if (!dd) + return -ENODEV; + + return omap_aes_crypt_dma_start(dd); } static void omap_aes_done_task(unsigned long data) @@ -704,18 +695,10 @@ static void omap_aes_done_task(unsigned long data) } omap_aes_finish_req(dd, 0); - omap_aes_handle_queue(dd, NULL); pr_debug("exit\n"); } -static void omap_aes_queue_task(unsigned long data) -{ - struct omap_aes_dev *dd = (struct omap_aes_dev *)data; - - omap_aes_handle_queue(dd, NULL); -} - static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( @@ -1175,9 +1158,6 @@ static int omap_aes_probe(struct platform_device *pdev) dd->dev = dev; platform_set_drvdata(pdev, dd); - spin_lock_init(&dd->lock); - crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); - err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : omap_aes_get_res_pdev(dd, pdev, &res); if (err) @@ -1209,7 +1189,6 @@ static int omap_aes_probe(struct platform_device *pdev) (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); - tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); err = omap_aes_dma_init(dd); if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { @@ -1250,7 +1229,20 @@ static int omap_aes_probe(struct platform_device *pdev) } } + /* Initialize crypto engine */ + dd->engine = crypto_engine_alloc_init(dev, 1); + if (!dd->engine) + goto err_algs; + + dd->engine->prepare_request = omap_aes_prepare_req; + dd->engine->crypt_one_request = omap_aes_crypt_req; + err = crypto_engine_start(dd->engine); + if (err) + goto err_engine; + return 0; +err_engine: + crypto_engine_exit(dd->engine); err_algs: for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) @@ -1260,7 +1252,6 @@ err_algs: omap_aes_dma_cleanup(dd); err_irq: tasklet_kill(&dd->done_task); - tasklet_kill(&dd->queue_task); pm_runtime_disable(dev); err_res: dd = NULL; @@ -1286,8 +1277,8 @@ static int omap_aes_remove(struct platform_device *pdev) crypto_unregister_alg( &dd->pdata->algs_info[i].algs_list[j]); + crypto_engine_exit(dd->engine); tasklet_kill(&dd->done_task); - tasklet_kill(&dd->queue_task); omap_aes_dma_cleanup(dd); pm_runtime_disable(dd->dev); dd = NULL; diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index f96d427e502c..5a07208ce778 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -55,8 +55,8 @@ #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" -#define ADF_C62X_DEVICE_NAME "c62x" -#define ADF_C62XVF_DEVICE_NAME "c62xvf" +#define ADF_C62X_DEVICE_NAME "c6xx" +#define ADF_C62XVF_DEVICE_NAME "c6xxvf" #define ADF_C3XXX_DEVICE_NAME "c3xxx" #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index e78a1d7d88fc..b40d9c8dad96 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -121,7 +121,6 @@ static void adf_device_reset_worker(struct work_struct *work) adf_dev_restarting_notify(accel_dev); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); - adf_dev_restore(accel_dev); if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h index ef5988afd4c6..b5484bfa6996 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_user.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -58,7 +58,7 @@ struct adf_user_cfg_key_val { uint64_t padding3; }; enum adf_cfg_val_type type; -}; +} __packed; struct adf_user_cfg_section { char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; @@ -70,7 +70,7 @@ struct adf_user_cfg_section { struct adf_user_cfg_section *next; uint64_t padding3; }; -}; +} __packed; struct adf_user_cfg_ctl_data { union { @@ -78,5 +78,5 @@ struct adf_user_cfg_ctl_data { uint64_t padding; }; uint8_t device_id; -}; +} __packed; #endif diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index f267d9e42e0b..d7dd18d9bef8 100644 --- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c @@ -49,7 +49,6 @@ #include "adf_transport_internal.h" #define ADF_ARB_NUM 4 -#define ADF_ARB_REQ_RING_NUM 8 #define ADF_ARB_REG_SIZE 0x4 #define ADF_ARB_WTR_SIZE 0x20 #define ADF_ARB_OFFSET 0x30000 @@ -64,15 +63,6 @@ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ (ADF_ARB_REG_SLOT * index), value) -#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ - ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value) - -#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \ - ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ - ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \ - (ADF_ARB_REG_SIZE * index), value) - #define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \ ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \ (ADF_ARB_REG_SIZE * index), value) @@ -99,15 +89,6 @@ int adf_init_arb(struct adf_accel_dev *accel_dev) for (arb = 0; arb < ADF_ARB_NUM; arb++) WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg); - /* Setup service weighting */ - for (arb = 0; arb < ADF_ARB_NUM; arb++) - for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) - WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF); - - /* Setup ring response ordering */ - for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) - WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); - /* Setup worker queue registers */ for (i = 0; i < hw_data->num_engines; i++) WRITE_CSR_ARB_WQCFG(csr, i, i); diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index d97db990955d..5d1ee7e53492 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -112,27 +112,27 @@ enum icp_qat_uof_mem_region { }; enum icp_qat_uof_regtype { - ICP_NO_DEST, - ICP_GPA_REL, - ICP_GPA_ABS, - ICP_GPB_REL, - ICP_GPB_ABS, - ICP_SR_REL, - ICP_SR_RD_REL, - ICP_SR_WR_REL, - ICP_SR_ABS, - ICP_SR_RD_ABS, - ICP_SR_WR_ABS, - ICP_DR_REL, - ICP_DR_RD_REL, - ICP_DR_WR_REL, - ICP_DR_ABS, - ICP_DR_RD_ABS, - ICP_DR_WR_ABS, - ICP_LMEM, - ICP_LMEM0, - ICP_LMEM1, - ICP_NEIGH_REL, + ICP_NO_DEST = 0, + ICP_GPA_REL = 1, + ICP_GPA_ABS = 2, + ICP_GPB_REL = 3, + ICP_GPB_ABS = 4, + ICP_SR_REL = 5, + ICP_SR_RD_REL = 6, + ICP_SR_WR_REL = 7, + ICP_SR_ABS = 8, + ICP_SR_RD_ABS = 9, + ICP_SR_WR_ABS = 10, + ICP_DR_REL = 19, + ICP_DR_RD_REL = 20, + ICP_DR_WR_REL = 21, + ICP_DR_ABS = 22, + ICP_DR_RD_ABS = 23, + ICP_DR_WR_ABS = 24, + ICP_LMEM = 26, + ICP_LMEM0 = 27, + ICP_LMEM1 = 28, + ICP_NEIGH_REL = 31, }; enum icp_qat_css_fwtype { diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 59e4c3af15ed..1e8852a8a057 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -1064,8 +1064,7 @@ static int qat_alg_aead_init(struct crypto_aead *tfm, if (IS_ERR(ctx->hash_tfm)) return PTR_ERR(ctx->hash_tfm); ctx->qat_hash_alg = hash; - crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + - sizeof(struct qat_crypto_request)); + crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request)); return 0; } @@ -1114,8 +1113,7 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm) struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->lock); - tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + - sizeof(struct qat_crypto_request); + tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request); ctx->tfm = tfm; return 0; } diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 51c594fdacdc..e5c0727d4876 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -340,14 +340,16 @@ static int qat_rsa_enc(struct akcipher_request *req) if (!ret) return -EINPROGRESS; -unmap_src: - if (qat_req->src_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.enc.m); - else - if (!dma_mapping_error(dev, qat_req->in.enc.m)) - dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, - DMA_TO_DEVICE); + + if (!dma_mapping_error(dev, qat_req->phy_out)) + dma_unmap_single(dev, qat_req->phy_out, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); +unmap_in_params: + if (!dma_mapping_error(dev, qat_req->phy_in)) + dma_unmap_single(dev, qat_req->phy_in, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); unmap_dst: if (qat_req->dst_align) dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, @@ -356,15 +358,14 @@ unmap_dst: if (!dma_mapping_error(dev, qat_req->out.enc.c)) dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, DMA_FROM_DEVICE); -unmap_in_params: - if (!dma_mapping_error(dev, qat_req->phy_in)) - dma_unmap_single(dev, qat_req->phy_in, - sizeof(struct qat_rsa_input_params), - DMA_TO_DEVICE); - if (!dma_mapping_error(dev, qat_req->phy_out)) - dma_unmap_single(dev, qat_req->phy_out, - sizeof(struct qat_rsa_output_params), - DMA_TO_DEVICE); +unmap_src: + if (qat_req->src_align) + dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, + qat_req->in.enc.m); + else + if (!dma_mapping_error(dev, qat_req->in.enc.m)) + dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, + DMA_TO_DEVICE); return ret; } @@ -472,14 +473,16 @@ static int qat_rsa_dec(struct akcipher_request *req) if (!ret) return -EINPROGRESS; -unmap_src: - if (qat_req->src_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.dec.c); - else - if (!dma_mapping_error(dev, qat_req->in.dec.c)) - dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, - DMA_TO_DEVICE); + + if (!dma_mapping_error(dev, qat_req->phy_out)) + dma_unmap_single(dev, qat_req->phy_out, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); +unmap_in_params: + if (!dma_mapping_error(dev, qat_req->phy_in)) + dma_unmap_single(dev, qat_req->phy_in, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); unmap_dst: if (qat_req->dst_align) dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, @@ -488,15 +491,14 @@ unmap_dst: if (!dma_mapping_error(dev, qat_req->out.dec.m)) dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, DMA_FROM_DEVICE); -unmap_in_params: - if (!dma_mapping_error(dev, qat_req->phy_in)) - dma_unmap_single(dev, qat_req->phy_in, - sizeof(struct qat_rsa_input_params), - DMA_TO_DEVICE); - if (!dma_mapping_error(dev, qat_req->phy_out)) - dma_unmap_single(dev, qat_req->phy_out, - sizeof(struct qat_rsa_output_params), - DMA_TO_DEVICE); +unmap_src: + if (qat_req->src_align) + dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, + qat_req->in.dec.c); + else + if (!dma_mapping_error(dev, qat_req->in.dec.c)) + dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, + DMA_TO_DEVICE); return ret; } diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 25d15f19c2b3..9b961b37a282 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -688,7 +688,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) int mflag = 0; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - for (ae = 0; ae <= max_ae; ae++) { + for (ae = 0; ae < max_ae; ae++) { if (!test_bit(ae, (unsigned long *)&handle->hal_handle->ae_mask)) continue; diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile index 7051c6c715f3..30f91297b4b6 100644 --- a/drivers/crypto/rockchip/Makefile +++ b/drivers/crypto/rockchip/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o rk_crypto-objs := rk3288_crypto.o \ rk3288_crypto_ablkcipher.o \ + rk3288_crypto_ahash.o diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index da9c73dce4af..af508258d2ea 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -208,6 +208,8 @@ static void rk_crypto_tasklet_cb(unsigned long data) if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) dev->ablk_req = ablkcipher_request_cast(async_req); + else + dev->ahash_req = ahash_request_cast(async_req); err = dev->start(dev); if (err) dev->complete(dev, err); @@ -220,6 +222,9 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = { &rk_cbc_des_alg, &rk_ecb_des3_ede_alg, &rk_cbc_des3_ede_alg, + &rk_ahash_sha1, + &rk_ahash_sha256, + &rk_ahash_md5, }; static int rk_crypto_register(struct rk_crypto_info *crypto_info) @@ -229,15 +234,24 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info) for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { rk_cipher_algs[i]->dev = crypto_info; - err = crypto_register_alg(&rk_cipher_algs[i]->alg); + if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) + err = crypto_register_alg( + &rk_cipher_algs[i]->alg.crypto); + else + err = crypto_register_ahash( + &rk_cipher_algs[i]->alg.hash); if (err) goto err_cipher_algs; } return 0; err_cipher_algs: - for (k = 0; k < i; k++) - crypto_unregister_alg(&rk_cipher_algs[k]->alg); + for (k = 0; k < i; k++) { + if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) + crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto); + else + crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); + } return err; } @@ -245,8 +259,12 @@ static void rk_crypto_unregister(void) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) - crypto_unregister_alg(&rk_cipher_algs[i]->alg); + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { + if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) + crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto); + else + crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); + } } static void rk_crypto_action(void *data) diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index e499c2c6c903..d7b71fea320b 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -6,6 +6,10 @@ #include <crypto/algapi.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <crypto/internal/hash.h> + +#include <crypto/md5.h> +#include <crypto/sha.h> #define _SBF(v, f) ((v) << (f)) @@ -149,6 +153,28 @@ #define RK_CRYPTO_TDES_KEY3_0 0x0130 #define RK_CRYPTO_TDES_KEY3_1 0x0134 +/* HASH */ +#define RK_CRYPTO_HASH_CTRL 0x0180 +#define RK_CRYPTO_HASH_SWAP_DO BIT(3) +#define RK_CRYPTO_HASH_SWAP_DI BIT(2) +#define RK_CRYPTO_HASH_SHA1 _SBF(0x00, 0) +#define RK_CRYPTO_HASH_MD5 _SBF(0x01, 0) +#define RK_CRYPTO_HASH_SHA256 _SBF(0x02, 0) +#define RK_CRYPTO_HASH_PRNG _SBF(0x03, 0) + +#define RK_CRYPTO_HASH_STS 0x0184 +#define RK_CRYPTO_HASH_DONE BIT(0) + +#define RK_CRYPTO_HASH_MSG_LEN 0x0188 +#define RK_CRYPTO_HASH_DOUT_0 0x018c +#define RK_CRYPTO_HASH_DOUT_1 0x0190 +#define RK_CRYPTO_HASH_DOUT_2 0x0194 +#define RK_CRYPTO_HASH_DOUT_3 0x0198 +#define RK_CRYPTO_HASH_DOUT_4 0x019c +#define RK_CRYPTO_HASH_DOUT_5 0x01a0 +#define RK_CRYPTO_HASH_DOUT_6 0x01a4 +#define RK_CRYPTO_HASH_DOUT_7 0x01a8 + #define CRYPTO_READ(dev, offset) \ readl_relaxed(((dev)->reg + (offset))) #define CRYPTO_WRITE(dev, offset, val) \ @@ -166,6 +192,7 @@ struct rk_crypto_info { struct crypto_queue queue; struct tasklet_struct crypto_tasklet; struct ablkcipher_request *ablk_req; + struct ahash_request *ahash_req; /* device lock */ spinlock_t lock; @@ -195,15 +222,36 @@ struct rk_crypto_info { void (*unload_data)(struct rk_crypto_info *dev); }; +/* the private variable of hash */ +struct rk_ahash_ctx { + struct rk_crypto_info *dev; + /* for fallback */ + struct crypto_ahash *fallback_tfm; +}; + +/* the privete variable of hash for fallback */ +struct rk_ahash_rctx { + struct ahash_request fallback_req; +}; + /* the private variable of cipher */ struct rk_cipher_ctx { struct rk_crypto_info *dev; unsigned int keylen; }; +enum alg_type { + ALG_TYPE_HASH, + ALG_TYPE_CIPHER, +}; + struct rk_crypto_tmp { - struct rk_crypto_info *dev; - struct crypto_alg alg; + struct rk_crypto_info *dev; + union { + struct crypto_alg crypto; + struct ahash_alg hash; + } alg; + enum alg_type type; }; extern struct rk_crypto_tmp rk_ecb_aes_alg; @@ -213,4 +261,8 @@ extern struct rk_crypto_tmp rk_cbc_des_alg; extern struct rk_crypto_tmp rk_ecb_des3_ede_alg; extern struct rk_crypto_tmp rk_cbc_des3_ede_alg; +extern struct rk_crypto_tmp rk_ahash_sha1; +extern struct rk_crypto_tmp rk_ahash_sha256; +extern struct rk_crypto_tmp rk_ahash_md5; + #endif diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index d98b681f6c06..b5a3afe222e4 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -336,7 +336,7 @@ static int rk_ablk_cra_init(struct crypto_tfm *tfm) struct crypto_alg *alg = tfm->__crt_alg; struct rk_crypto_tmp *algt; - algt = container_of(alg, struct rk_crypto_tmp, alg); + algt = container_of(alg, struct rk_crypto_tmp, alg.crypto); ctx->dev = algt->dev; ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1; @@ -357,7 +357,8 @@ static void rk_ablk_cra_exit(struct crypto_tfm *tfm) } struct rk_crypto_tmp rk_ecb_aes_alg = { - .alg = { + .type = ALG_TYPE_CIPHER, + .alg.crypto = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-rk", .cra_priority = 300, @@ -381,7 +382,8 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { }; struct rk_crypto_tmp rk_cbc_aes_alg = { - .alg = { + .type = ALG_TYPE_CIPHER, + .alg.crypto = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-rk", .cra_priority = 300, @@ -406,7 +408,8 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { }; struct rk_crypto_tmp rk_ecb_des_alg = { - .alg = { + .type = ALG_TYPE_CIPHER, + .alg.crypto = { .cra_name = "ecb(des)", .cra_driver_name = "ecb-des-rk", .cra_priority = 300, @@ -430,7 +433,8 @@ struct rk_crypto_tmp rk_ecb_des_alg = { }; struct rk_crypto_tmp rk_cbc_des_alg = { - .alg = { + .type = ALG_TYPE_CIPHER, + .alg.crypto = { .cra_name = "cbc(des)", .cra_driver_name = "cbc-des-rk", .cra_priority = 300, @@ -455,7 +459,8 @@ struct rk_crypto_tmp rk_cbc_des_alg = { }; struct rk_crypto_tmp rk_ecb_des3_ede_alg = { - .alg = { + .type = ALG_TYPE_CIPHER, + .alg.crypto = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-ede-rk", .cra_priority = 300, @@ -480,7 +485,8 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { }; struct rk_crypto_tmp rk_cbc_des3_ede_alg = { - .alg = { + .type = ALG_TYPE_CIPHER, + .alg.crypto = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3-ede-rk", .cra_priority = 300, diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c new file mode 100644 index 000000000000..718588219f75 --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -0,0 +1,404 @@ +/* + * Crypto acceleration support for Rockchip RK3288 + * + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Zain Wang <zain.wang@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Some ideas are from marvell/cesa.c and s5p-sss.c driver. + */ +#include "rk3288_crypto.h" + +/* + * IC can not process zero message hash, + * so we put the fixed hash out when met zero message. + */ + +static int zero_message_process(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int rk_digest_size = crypto_ahash_digestsize(tfm); + + switch (rk_digest_size) { + case SHA1_DIGEST_SIZE: + memcpy(req->result, sha1_zero_message_hash, rk_digest_size); + break; + case SHA256_DIGEST_SIZE: + memcpy(req->result, sha256_zero_message_hash, rk_digest_size); + break; + case MD5_DIGEST_SIZE: + memcpy(req->result, md5_zero_message_hash, rk_digest_size); + break; + default: + return -EINVAL; + } + + return 0; +} + +static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err) +{ + if (dev->ahash_req->base.complete) + dev->ahash_req->base.complete(&dev->ahash_req->base, err); +} + +static void rk_ahash_reg_init(struct rk_crypto_info *dev) +{ + int reg_status = 0; + + reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | + RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16); + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status); + + reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL); + reg_status &= (~RK_CRYPTO_HASH_FLUSH); + reg_status |= _SBF(0xffff, 16); + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status); + + memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32); + + CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA | + RK_CRYPTO_HRDMA_DONE_ENA); + + CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT | + RK_CRYPTO_HRDMA_DONE_INT); + + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode | + RK_CRYPTO_HASH_SWAP_DO); + + CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO | + RK_CRYPTO_BYTESWAP_BRFIFO | + RK_CRYPTO_BYTESWAP_BTFIFO); + + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); +} + +static int rk_ahash_init(struct ahash_request *req) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +static int rk_ahash_update(struct ahash_request *req) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +static int rk_ahash_final(struct ahash_request *req) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = req->result; + + return crypto_ahash_final(&rctx->fallback_req); +} + +static int rk_ahash_finup(struct ahash_request *req) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int rk_ahash_import(struct ahash_request *req, const void *in) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +static int rk_ahash_export(struct ahash_request *req, void *out) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(&rctx->fallback_req, out); +} + +static int rk_ahash_digest(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct rk_crypto_info *dev = NULL; + unsigned long flags; + int ret; + + if (!req->nbytes) + return zero_message_process(req); + + dev = tctx->dev; + dev->total = req->nbytes; + dev->left_bytes = req->nbytes; + dev->aligned = 0; + dev->mode = 0; + dev->align_size = 4; + dev->sg_dst = NULL; + dev->sg_src = req->src; + dev->first = req->src; + dev->nents = sg_nents(req->src); + + switch (crypto_ahash_digestsize(tfm)) { + case SHA1_DIGEST_SIZE: + dev->mode = RK_CRYPTO_HASH_SHA1; + break; + case SHA256_DIGEST_SIZE: + dev->mode = RK_CRYPTO_HASH_SHA256; + break; + case MD5_DIGEST_SIZE: + dev->mode = RK_CRYPTO_HASH_MD5; + break; + default: + return -EINVAL; + } + + rk_ahash_reg_init(dev); + + spin_lock_irqsave(&dev->lock, flags); + ret = crypto_enqueue_request(&dev->queue, &req->base); + spin_unlock_irqrestore(&dev->lock, flags); + + tasklet_schedule(&dev->crypto_tasklet); + + /* + * it will take some time to process date after last dma transmission. + * + * waiting time is relative with the last date len, + * so cannot set a fixed time here. + * 10-50 makes system not call here frequently wasting + * efficiency, and make it response quickly when dma + * complete. + */ + while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) + usleep_range(10, 50); + + memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, + crypto_ahash_digestsize(tfm)); + + return 0; +} + +static void crypto_ahash_dma_start(struct rk_crypto_info *dev) +{ + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | + (RK_CRYPTO_HASH_START << 16)); +} + +static int rk_ahash_set_data_start(struct rk_crypto_info *dev) +{ + int err; + + err = dev->load_data(dev, dev->sg_src, NULL); + if (!err) + crypto_ahash_dma_start(dev); + return err; +} + +static int rk_ahash_start(struct rk_crypto_info *dev) +{ + return rk_ahash_set_data_start(dev); +} + +static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) +{ + int err = 0; + + dev->unload_data(dev); + if (dev->left_bytes) { + if (dev->aligned) { + if (sg_is_last(dev->sg_src)) { + dev_warn(dev->dev, "[%s:%d], Lack of data\n", + __func__, __LINE__); + err = -ENOMEM; + goto out_rx; + } + dev->sg_src = sg_next(dev->sg_src); + } + err = rk_ahash_set_data_start(dev); + } else { + dev->complete(dev, 0); + } + +out_rx: + return err; +} + +static int rk_cra_hash_init(struct crypto_tfm *tfm) +{ + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); + struct rk_crypto_tmp *algt; + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + + const char *alg_name = crypto_tfm_alg_name(tfm); + + algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + + tctx->dev = algt->dev; + tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); + if (!tctx->dev->addr_vir) { + dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); + return -ENOMEM; + } + tctx->dev->start = rk_ahash_start; + tctx->dev->update = rk_ahash_crypto_rx; + tctx->dev->complete = rk_ahash_crypto_complete; + + /* for fallback */ + tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(tctx->fallback_tfm)) { + dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); + return PTR_ERR(tctx->fallback_tfm); + } + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct rk_ahash_rctx) + + crypto_ahash_reqsize(tctx->fallback_tfm)); + + return tctx->dev->enable_clk(tctx->dev); +} + +static void rk_cra_hash_exit(struct crypto_tfm *tfm) +{ + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); + + free_page((unsigned long)tctx->dev->addr_vir); + return tctx->dev->disable_clk(tctx->dev); +} + +struct rk_crypto_tmp rk_ahash_sha1 = { + .type = ALG_TYPE_HASH, + .alg.hash = { + .init = rk_ahash_init, + .update = rk_ahash_update, + .final = rk_ahash_final, + .finup = rk_ahash_finup, + .export = rk_ahash_export, + .import = rk_ahash_import, + .digest = rk_ahash_digest, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "rk-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_ahash_ctx), + .cra_alignmask = 3, + .cra_init = rk_cra_hash_init, + .cra_exit = rk_cra_hash_exit, + .cra_module = THIS_MODULE, + } + } + } +}; + +struct rk_crypto_tmp rk_ahash_sha256 = { + .type = ALG_TYPE_HASH, + .alg.hash = { + .init = rk_ahash_init, + .update = rk_ahash_update, + .final = rk_ahash_final, + .finup = rk_ahash_finup, + .export = rk_ahash_export, + .import = rk_ahash_import, + .digest = rk_ahash_digest, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "rk-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_ahash_ctx), + .cra_alignmask = 3, + .cra_init = rk_cra_hash_init, + .cra_exit = rk_cra_hash_exit, + .cra_module = THIS_MODULE, + } + } + } +}; + +struct rk_crypto_tmp rk_ahash_md5 = { + .type = ALG_TYPE_HASH, + .alg.hash = { + .init = rk_ahash_init, + .update = rk_ahash_update, + .final = rk_ahash_final, + .finup = rk_ahash_finup, + .export = rk_ahash_export, + .import = rk_ahash_import, + .digest = rk_ahash_digest, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "rk-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_ahash_ctx), + .cra_alignmask = 3, + .cra_init = rk_cra_hash_init, + .cra_exit = rk_cra_hash_exit, + .cra_module = THIS_MODULE, + } + } + } +}; diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index f214a8755827..5f161a9777e3 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -224,6 +224,7 @@ static inline struct samsung_aes_variant *find_s5p_sss_version { if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { const struct of_device_id *match; + match = of_match_node(s5p_sss_dt_match, pdev->dev.of_node); return (struct samsung_aes_variant *)match->data; @@ -382,7 +383,7 @@ static void s5p_set_aes(struct s5p_aes_dev *dev, void __iomem *keystart; if (iv) - memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); + memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); if (keylen == AES_KEYSIZE_256) keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); @@ -391,13 +392,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev, else keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4); - memcpy(keystart, key, keylen); + memcpy_toio(keystart, key, keylen); } static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) { struct ablkcipher_request *req = dev->req; - uint32_t aes_control; int err; unsigned long flags; @@ -518,7 +518,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) struct s5p_aes_dev *dev = ctx->dev; if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { - pr_err("request size is not exact amount of AES blocks\n"); + dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); return -EINVAL; } @@ -566,7 +566,7 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req) static int s5p_aes_cra_init(struct crypto_tfm *tfm) { - struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); ctx->dev = s5p_dev; tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx); @@ -701,7 +701,7 @@ static int s5p_aes_probe(struct platform_device *pdev) goto err_algs; } - pr_info("s5p-sss driver registered\n"); + dev_info(dev, "s5p-sss driver registered\n"); return 0; diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 6c4f91c5e6b3..c3f3d89e4831 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -182,7 +182,6 @@ struct sahara_sha_reqctx { u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE]; u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE]; u8 context[SHA256_DIGEST_SIZE + 4]; - struct mutex mutex; unsigned int mode; unsigned int digest_size; unsigned int context_size; @@ -1096,7 +1095,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last) if (!req->nbytes && !last) return 0; - mutex_lock(&rctx->mutex); rctx->last = last; if (!rctx->active) { @@ -1109,7 +1107,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last) mutex_unlock(&dev->queue_mutex); wake_up_process(dev->kthread); - mutex_unlock(&rctx->mutex); return ret; } @@ -1137,8 +1134,6 @@ static int sahara_sha_init(struct ahash_request *req) rctx->context_size = rctx->digest_size + 4; rctx->active = 0; - mutex_init(&rctx->mutex); - return 0; } @@ -1167,26 +1162,18 @@ static int sahara_sha_digest(struct ahash_request *req) static int sahara_sha_export(struct ahash_request *req, void *out) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct sahara_ctx *ctx = crypto_ahash_ctx(ahash); struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); - memcpy(out, ctx, sizeof(struct sahara_ctx)); - memcpy(out + sizeof(struct sahara_sha_reqctx), rctx, - sizeof(struct sahara_sha_reqctx)); + memcpy(out, rctx, sizeof(struct sahara_sha_reqctx)); return 0; } static int sahara_sha_import(struct ahash_request *req, const void *in) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct sahara_ctx *ctx = crypto_ahash_ctx(ahash); struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); - memcpy(ctx, in, sizeof(struct sahara_ctx)); - memcpy(rctx, in + sizeof(struct sahara_sha_reqctx), - sizeof(struct sahara_sha_reqctx)); + memcpy(rctx, in, sizeof(struct sahara_sha_reqctx)); return 0; } @@ -1272,6 +1259,7 @@ static struct ahash_alg sha_v3_algs[] = { .export = sahara_sha_export, .import = sahara_sha_import, .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct sahara_sha_reqctx), .halg.base = { .cra_name = "sha1", .cra_driver_name = "sahara-sha1", @@ -1299,6 +1287,7 @@ static struct ahash_alg sha_v4_algs[] = { .export = sahara_sha_export, .import = sahara_sha_import, .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct sahara_sha_reqctx), .halg.base = { .cra_name = "sha256", .cra_driver_name = "sahara-sha256", diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index a19ee127edca..7be3fbcd8d78 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -251,11 +251,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); - dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n", + dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", mode, oi, mi.length, ileft, areq->nbytes, rx_cnt, - oo, mo.length, oleft, areq->nbytes, tx_cnt, - todo, ob); + oo, mo.length, oleft, areq->nbytes, tx_cnt, ob); if (tx_cnt == 0) continue; diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 4c243c1ffc7f..790f7cadc1ed 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev) device_data->phybase = res->start; device_data->base = devm_ioremap_resource(dev, res); - if (!device_data->base) { + if (IS_ERR(device_data->base)) { dev_err(dev, "[%s]: ioremap failed!", __func__); - ret = -ENOMEM; + ret = PTR_ERR(device_data->base); goto out; } diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index d6fdc583ce5d..574e87c7f2b8 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev) device_data->phybase = res->start; device_data->base = devm_ioremap_resource(dev, res); - if (!device_data->base) { + if (IS_ERR(device_data->base)) { dev_err(dev, "%s: ioremap() failed!\n", __func__); - ret = -ENOMEM; + ret = PTR_ERR(device_data->base); goto out; } spin_lock_init(&device_data->ctx_lock); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 5c934b670154..4f3cb3554944 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -28,6 +28,7 @@ #include <crypto/hash.h> #include <crypto/md5.h> #include <crypto/algapi.h> +#include <crypto/skcipher.h> #include <linux/device-mapper.h> @@ -44,7 +45,7 @@ struct convert_context { struct bvec_iter iter_out; sector_t cc_sector; atomic_t cc_pending; - struct ablkcipher_request *req; + struct skcipher_request *req; }; /* @@ -86,7 +87,7 @@ struct crypt_iv_operations { }; struct iv_essiv_private { - struct crypto_hash *hash_tfm; + struct crypto_ahash *hash_tfm; u8 *salt; }; @@ -153,13 +154,13 @@ struct crypt_config { /* ESSIV: struct crypto_cipher *essiv_tfm */ void *iv_private; - struct crypto_ablkcipher **tfms; + struct crypto_skcipher **tfms; unsigned tfms_count; /* * Layout of each crypto request: * - * struct ablkcipher_request + * struct skcipher_request * context * padding * struct dm_crypt_request @@ -189,7 +190,7 @@ static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); /* * Use this to access cipher attributes that are the same for each CPU. */ -static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) +static struct crypto_skcipher *any_tfm(struct crypt_config *cc) { return cc->tfms[0]; } @@ -263,23 +264,25 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_essiv_init(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; - struct hash_desc desc; + AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm); struct scatterlist sg; struct crypto_cipher *essiv_tfm; int err; sg_init_one(&sg, cc->key, cc->key_size); - desc.tfm = essiv->hash_tfm; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_tfm(req, essiv->hash_tfm); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size); - err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); + err = crypto_ahash_digest(req); + ahash_request_zero(req); if (err) return err; essiv_tfm = cc->iv_private; err = crypto_cipher_setkey(essiv_tfm, essiv->salt, - crypto_hash_digestsize(essiv->hash_tfm)); + crypto_ahash_digestsize(essiv->hash_tfm)); if (err) return err; @@ -290,7 +293,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) static int crypt_iv_essiv_wipe(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; - unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); + unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm); struct crypto_cipher *essiv_tfm; int r, err = 0; @@ -320,7 +323,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, } if (crypto_cipher_blocksize(essiv_tfm) != - crypto_ablkcipher_ivsize(any_tfm(cc))) { + crypto_skcipher_ivsize(any_tfm(cc))) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; crypto_free_cipher(essiv_tfm); @@ -342,7 +345,7 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc) struct crypto_cipher *essiv_tfm; struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; - crypto_free_hash(essiv->hash_tfm); + crypto_free_ahash(essiv->hash_tfm); essiv->hash_tfm = NULL; kzfree(essiv->salt); @@ -360,7 +363,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { struct crypto_cipher *essiv_tfm = NULL; - struct crypto_hash *hash_tfm = NULL; + struct crypto_ahash *hash_tfm = NULL; u8 *salt = NULL; int err; @@ -370,14 +373,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, } /* Allocate hash algorithm */ - hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); + hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; err = PTR_ERR(hash_tfm); goto bad; } - salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); + salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL); if (!salt) { ti->error = "Error kmallocing salt storage in ESSIV"; err = -ENOMEM; @@ -388,7 +391,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, cc->iv_gen_private.essiv.hash_tfm = hash_tfm; essiv_tfm = setup_essiv_cpu(cc, ti, salt, - crypto_hash_digestsize(hash_tfm)); + crypto_ahash_digestsize(hash_tfm)); if (IS_ERR(essiv_tfm)) { crypt_iv_essiv_dtr(cc); return PTR_ERR(essiv_tfm); @@ -399,7 +402,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, bad: if (hash_tfm && !IS_ERR(hash_tfm)) - crypto_free_hash(hash_tfm); + crypto_free_ahash(hash_tfm); kfree(salt); return err; } @@ -419,7 +422,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); + unsigned bs = crypto_skcipher_blocksize(any_tfm(cc)); int log = ilog2(bs); /* we need to calculate how far we must shift the sector count @@ -816,27 +819,27 @@ static void crypt_convert_init(struct crypt_config *cc, } static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, - struct ablkcipher_request *req) + struct skcipher_request *req) { return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); } -static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, +static struct skcipher_request *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) { - return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); + return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start); } static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) { return (u8 *)ALIGN((unsigned long)(dmreq + 1), - crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); + crypto_skcipher_alignmask(any_tfm(cc)) + 1); } static int crypt_convert_block(struct crypt_config *cc, struct convert_context *ctx, - struct ablkcipher_request *req) + struct skcipher_request *req) { struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); @@ -866,13 +869,13 @@ static int crypt_convert_block(struct crypt_config *cc, return r; } - ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, - 1 << SECTOR_SHIFT, iv); + skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, + 1 << SECTOR_SHIFT, iv); if (bio_data_dir(ctx->bio_in) == WRITE) - r = crypto_ablkcipher_encrypt(req); + r = crypto_skcipher_encrypt(req); else - r = crypto_ablkcipher_decrypt(req); + r = crypto_skcipher_decrypt(req); if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) r = cc->iv_gen_ops->post(cc, iv, dmreq); @@ -891,23 +894,23 @@ static void crypt_alloc_req(struct crypt_config *cc, if (!ctx->req) ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); - ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); + skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); /* * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs * requests if driver request queue is full. */ - ablkcipher_request_set_callback(ctx->req, + skcipher_request_set_callback(ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, kcryptd_async_done, dmreq_of_req(cc, ctx->req)); } static void crypt_free_req(struct crypt_config *cc, - struct ablkcipher_request *req, struct bio *base_bio) + struct skcipher_request *req, struct bio *base_bio) { struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); - if ((struct ablkcipher_request *)(io + 1) != req) + if ((struct skcipher_request *)(io + 1) != req) mempool_free(req, cc->req_pool); } @@ -1437,7 +1440,7 @@ static void crypt_free_tfms(struct crypt_config *cc) for (i = 0; i < cc->tfms_count; i++) if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { - crypto_free_ablkcipher(cc->tfms[i]); + crypto_free_skcipher(cc->tfms[i]); cc->tfms[i] = NULL; } @@ -1450,13 +1453,13 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) unsigned i; int err; - cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), + cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *), GFP_KERNEL); if (!cc->tfms) return -ENOMEM; for (i = 0; i < cc->tfms_count; i++) { - cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); + cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); if (IS_ERR(cc->tfms[i])) { err = PTR_ERR(cc->tfms[i]); crypt_free_tfms(cc); @@ -1476,9 +1479,9 @@ static int crypt_setkey_allcpus(struct crypt_config *cc) subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); for (i = 0; i < cc->tfms_count; i++) { - r = crypto_ablkcipher_setkey(cc->tfms[i], - cc->key + (i * subkey_size), - subkey_size); + r = crypto_skcipher_setkey(cc->tfms[i], + cc->key + (i * subkey_size), + subkey_size); if (r) err = r; } @@ -1645,7 +1648,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, } /* Initialize IV */ - cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); + cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ cc->iv_size = max(cc->iv_size, @@ -1763,21 +1766,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (ret < 0) goto bad; - cc->dmreq_start = sizeof(struct ablkcipher_request); - cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); + cc->dmreq_start = sizeof(struct skcipher_request); + cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc)); cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); - if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { + if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { /* Allocate the padding exactly */ iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) - & crypto_ablkcipher_alignmask(any_tfm(cc)); + & crypto_skcipher_alignmask(any_tfm(cc)); } else { /* * If the cipher requires greater alignment than kmalloc * alignment, we don't know the exact position of the * initialization vector. We must assume worst case. */ - iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); + iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc)); } ret = -ENOMEM; @@ -1922,7 +1925,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) io = dm_per_bio_data(bio, cc->per_bio_data_size); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); - io->ctx.req = (struct ablkcipher_request *)(io + 1); + io->ctx.req = (struct skcipher_request *)(io + 1); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index 05005c660d4d..f60f7660b451 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c @@ -42,6 +42,8 @@ * deprecated in 2.6 */ +#include <crypto/hash.h> +#include <crypto/skcipher.h> #include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> @@ -49,7 +51,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/crypto.h> #include <linux/mm.h> #include <linux/ppp_defs.h> #include <linux/ppp-comp.h> @@ -94,8 +95,8 @@ static inline void sha_pad_init(struct sha_pad *shapad) * State for an MPPE (de)compressor. */ struct ppp_mppe_state { - struct crypto_blkcipher *arc4; - struct crypto_hash *sha1; + struct crypto_skcipher *arc4; + struct crypto_ahash *sha1; unsigned char *sha1_digest; unsigned char master_key[MPPE_MAX_KEY_LEN]; unsigned char session_key[MPPE_MAX_KEY_LEN]; @@ -135,7 +136,7 @@ struct ppp_mppe_state { */ static void get_new_key_from_sha(struct ppp_mppe_state * state) { - struct hash_desc desc; + AHASH_REQUEST_ON_STACK(req, state->sha1); struct scatterlist sg[4]; unsigned int nbytes; @@ -148,10 +149,12 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state) nbytes += setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); - desc.tfm = state->sha1; - desc.flags = 0; + ahash_request_set_tfm(req, state->sha1); + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_crypt(req, sg, state->sha1_digest, nbytes); - crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); + crypto_ahash_digest(req); + ahash_request_zero(req); } /* @@ -161,20 +164,23 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state) static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) { struct scatterlist sg_in[1], sg_out[1]; - struct blkcipher_desc desc = { .tfm = state->arc4 }; + SKCIPHER_REQUEST_ON_STACK(req, state->arc4); + + skcipher_request_set_tfm(req, state->arc4); + skcipher_request_set_callback(req, 0, NULL, NULL); get_new_key_from_sha(state); if (!initial_key) { - crypto_blkcipher_setkey(state->arc4, state->sha1_digest, - state->keylen); + crypto_skcipher_setkey(state->arc4, state->sha1_digest, + state->keylen); sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); setup_sg(sg_in, state->sha1_digest, state->keylen); setup_sg(sg_out, state->session_key, state->keylen); - if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, - state->keylen) != 0) { + skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen, + NULL); + if (crypto_skcipher_encrypt(req)) printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); - } } else { memcpy(state->session_key, state->sha1_digest, state->keylen); } @@ -184,7 +190,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) state->session_key[1] = 0x26; state->session_key[2] = 0x9e; } - crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen); + crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen); + skcipher_request_zero(req); } /* @@ -204,19 +211,19 @@ static void *mppe_alloc(unsigned char *options, int optlen) goto out; - state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(state->arc4)) { state->arc4 = NULL; goto out_free; } - state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); + state->sha1 = crypto_alloc_ahash("sha1", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(state->sha1)) { state->sha1 = NULL; goto out_free; } - digestsize = crypto_hash_digestsize(state->sha1); + digestsize = crypto_ahash_digestsize(state->sha1); if (digestsize < MPPE_MAX_KEY_LEN) goto out_free; @@ -237,15 +244,12 @@ static void *mppe_alloc(unsigned char *options, int optlen) return (void *)state; - out_free: - if (state->sha1_digest) - kfree(state->sha1_digest); - if (state->sha1) - crypto_free_hash(state->sha1); - if (state->arc4) - crypto_free_blkcipher(state->arc4); - kfree(state); - out: +out_free: + kfree(state->sha1_digest); + crypto_free_ahash(state->sha1); + crypto_free_skcipher(state->arc4); + kfree(state); +out: return NULL; } @@ -256,13 +260,10 @@ static void mppe_free(void *arg) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; if (state) { - if (state->sha1_digest) kfree(state->sha1_digest); - if (state->sha1) - crypto_free_hash(state->sha1); - if (state->arc4) - crypto_free_blkcipher(state->arc4); - kfree(state); + crypto_free_ahash(state->sha1); + crypto_free_skcipher(state->arc4); + kfree(state); } } @@ -368,8 +369,9 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, int isize, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; - struct blkcipher_desc desc = { .tfm = state->arc4 }; + SKCIPHER_REQUEST_ON_STACK(req, state->arc4); int proto; + int err; struct scatterlist sg_in[1], sg_out[1]; /* @@ -426,7 +428,13 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, sg_init_table(sg_out, 1); setup_sg(sg_in, ibuf, isize); setup_sg(sg_out, obuf, osize); - if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { + + skcipher_request_set_tfm(req, state->arc4); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL); + err = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); + if (err) { printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); return -1; } @@ -475,7 +483,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; - struct blkcipher_desc desc = { .tfm = state->arc4 }; + SKCIPHER_REQUEST_ON_STACK(req, state->arc4); unsigned ccount; int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; struct scatterlist sg_in[1], sg_out[1]; @@ -609,9 +617,14 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, sg_init_table(sg_out, 1); setup_sg(sg_in, ibuf, 1); setup_sg(sg_out, obuf, 1); - if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { + + skcipher_request_set_tfm(req, state->arc4); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL); + if (crypto_skcipher_decrypt(req)) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); - return DECOMP_ERROR; + osize = DECOMP_ERROR; + goto out_zap_req; } /* @@ -629,9 +642,11 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, /* And finally, decrypt the rest of the packet. */ setup_sg(sg_in, ibuf + 1, isize - 1); setup_sg(sg_out, obuf + 1, osize - 1); - if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) { + skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL); + if (crypto_skcipher_decrypt(req)) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); - return DECOMP_ERROR; + osize = DECOMP_ERROR; + goto out_zap_req; } state->stats.unc_bytes += osize; @@ -642,6 +657,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, /* good packet credit */ state->sanity_errors >>= 1; +out_zap_req: + skcipher_request_zero(req); return osize; sanity_error: @@ -714,8 +731,8 @@ static struct compressor ppp_mppe = { static int __init ppp_mppe_init(void) { int answer; - if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && - crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC))) + if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && + crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))) return -ENODEV; sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c index fce4a843e656..bc7397d709d3 100644 --- a/drivers/net/wireless/intersil/orinoco/mic.c +++ b/drivers/net/wireless/intersil/orinoco/mic.c @@ -6,7 +6,7 @@ #include <linux/string.h> #include <linux/if_ether.h> #include <linux/scatterlist.h> -#include <linux/crypto.h> +#include <crypto/hash.h> #include "orinoco.h" #include "mic.h" @@ -16,7 +16,8 @@ /********************************************************************/ int orinoco_mic_init(struct orinoco_private *priv) { - priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0); + priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0, + CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " "crypto API michael_mic\n"); @@ -24,7 +25,8 @@ int orinoco_mic_init(struct orinoco_private *priv) return -ENOMEM; } - priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0); + priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0, + CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " "crypto API michael_mic\n"); @@ -38,18 +40,19 @@ int orinoco_mic_init(struct orinoco_private *priv) void orinoco_mic_free(struct orinoco_private *priv) { if (priv->tx_tfm_mic) - crypto_free_hash(priv->tx_tfm_mic); + crypto_free_ahash(priv->tx_tfm_mic); if (priv->rx_tfm_mic) - crypto_free_hash(priv->rx_tfm_mic); + crypto_free_ahash(priv->rx_tfm_mic); } -int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, +int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { - struct hash_desc desc; + AHASH_REQUEST_ON_STACK(req, tfm_michael); struct scatterlist sg[2]; u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ + int err; if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); @@ -69,11 +72,13 @@ int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, sg_set_buf(&sg[0], hdr, sizeof(hdr)); sg_set_buf(&sg[1], data, data_len); - if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN)) + if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN)) return -1; - desc.tfm = tfm_michael; - desc.flags = 0; - return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), - mic); + ahash_request_set_tfm(req, tfm_michael); + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr)); + err = crypto_ahash_digest(req); + ahash_request_zero(req); + return err; } diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h index 04d05bc566d6..ce731d05cc98 100644 --- a/drivers/net/wireless/intersil/orinoco/mic.h +++ b/drivers/net/wireless/intersil/orinoco/mic.h @@ -11,11 +11,11 @@ /* Forward declarations */ struct orinoco_private; -struct crypto_hash; +struct crypto_ahash; int orinoco_mic_init(struct orinoco_private *priv); void orinoco_mic_free(struct orinoco_private *priv); -int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, +int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h index eebd2be21ee9..2f0c84b1c440 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco.h +++ b/drivers/net/wireless/intersil/orinoco/orinoco.h @@ -152,8 +152,8 @@ struct orinoco_private { u8 *wpa_ie; int wpa_ie_len; - struct crypto_hash *rx_tfm_mic; - struct crypto_hash *tx_tfm_mic; + struct crypto_ahash *rx_tfm_mic; + struct crypto_ahash *tx_tfm_mic; unsigned int wpa_enabled:1; unsigned int tkip_cm_active:1; diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index 64a90252c57f..5f97da1947e3 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -19,7 +19,7 @@ #include <linux/completion.h> #include <linux/firmware.h> -#include <linux/crypto.h> +#include <crypto/hash.h> #include <crypto/sha.h> #include "s3fwrn5.h" @@ -429,8 +429,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) { struct s3fwrn5_fw_image *fw = &fw_info->fw; u8 hash_data[SHA1_DIGEST_SIZE]; - struct scatterlist sg; - struct hash_desc desc; + struct crypto_shash *tfm; u32 image_size, off; int ret; @@ -438,12 +437,31 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) /* Compute SHA of firmware data */ - sg_init_one(&sg, fw->image, image_size); - desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); - crypto_hash_init(&desc); - crypto_hash_update(&desc, &sg, image_size); - crypto_hash_final(&desc, hash_data); - crypto_free_hash(desc.tfm); + tfm = crypto_alloc_shash("sha1", 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + dev_err(&fw_info->ndev->nfc_dev->dev, + "Cannot allocate shash (code=%d)\n", ret); + goto out; + } + + { + SHASH_DESC_ON_STACK(desc, tfm); + + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_digest(desc, fw->image, image_size, + hash_data); + shash_desc_zero(desc); + } + + crypto_free_shash(tfm); + if (ret) { + dev_err(&fw_info->ndev->nfc_dev->dev, + "Cannot compute hash (code=%d)\n", ret); + goto out; + } /* Firmware update process */ diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 0b8af186e707..2e4c82f8329c 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -26,12 +26,12 @@ * Zhenyu Wang */ +#include <crypto/hash.h> #include <linux/types.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/blkdev.h> -#include <linux/crypto.h> #include <linux/delay.h> #include <linux/kfifo.h> #include <linux/scatterlist.h> @@ -428,7 +428,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, * sufficient room. */ if (conn->hdrdgst_en) { - iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen, + iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen, hdr + hdrlen); hdrlen += ISCSI_DIGEST_SIZE; } @@ -454,7 +454,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; - struct hash_desc *tx_hash = NULL; + struct ahash_request *tx_hash = NULL; unsigned int hdr_spec_len; ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len, @@ -467,7 +467,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); if (conn->datadgst_en) - tx_hash = &tcp_sw_conn->tx_hash; + tx_hash = tcp_sw_conn->tx_hash; return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment, sg, count, offset, len, @@ -480,7 +480,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data, { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; - struct hash_desc *tx_hash = NULL; + struct ahash_request *tx_hash = NULL; unsigned int hdr_spec_len; ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ? @@ -492,7 +492,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data, WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); if (conn->datadgst_en) - tx_hash = &tcp_sw_conn->tx_hash; + tx_hash = tcp_sw_conn->tx_hash; iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment, data, len, NULL, tx_hash); @@ -543,6 +543,7 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct crypto_ahash *tfm; cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn), conn_idx); @@ -552,23 +553,28 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; - tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, - CRYPTO_ALG_ASYNC); - tcp_sw_conn->tx_hash.flags = 0; - if (IS_ERR(tcp_sw_conn->tx_hash.tfm)) + tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) goto free_conn; - tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, - CRYPTO_ALG_ASYNC); - tcp_sw_conn->rx_hash.flags = 0; - if (IS_ERR(tcp_sw_conn->rx_hash.tfm)) - goto free_tx_tfm; - tcp_conn->rx_hash = &tcp_sw_conn->rx_hash; + tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!tcp_sw_conn->tx_hash) + goto free_tfm; + ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL); + + tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!tcp_sw_conn->rx_hash) + goto free_tx_hash; + ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL); + + tcp_conn->rx_hash = tcp_sw_conn->rx_hash; return cls_conn; -free_tx_tfm: - crypto_free_hash(tcp_sw_conn->tx_hash.tfm); +free_tx_hash: + ahash_request_free(tcp_sw_conn->tx_hash); +free_tfm: + crypto_free_ahash(tfm); free_conn: iscsi_conn_printk(KERN_ERR, conn, "Could not create connection due to crc32c " @@ -607,10 +613,14 @@ static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) iscsi_sw_tcp_release_conn(conn); - if (tcp_sw_conn->tx_hash.tfm) - crypto_free_hash(tcp_sw_conn->tx_hash.tfm); - if (tcp_sw_conn->rx_hash.tfm) - crypto_free_hash(tcp_sw_conn->rx_hash.tfm); + ahash_request_free(tcp_sw_conn->rx_hash); + if (tcp_sw_conn->tx_hash) { + struct crypto_ahash *tfm; + + tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash); + ahash_request_free(tcp_sw_conn->tx_hash); + crypto_free_ahash(tfm); + } iscsi_tcp_conn_teardown(cls_conn); } diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index f42ecb238af5..06d42d00a323 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -45,8 +45,8 @@ struct iscsi_sw_tcp_conn { void (*old_write_space)(struct sock *); /* data and header digests */ - struct hash_desc tx_hash; /* CRC32C (Tx) */ - struct hash_desc rx_hash; /* CRC32C (Rx) */ + struct ahash_request *tx_hash; /* CRC32C (Tx) */ + struct ahash_request *rx_hash; /* CRC32C (Rx) */ /* MIB custom statistics */ uint32_t sendpage_failures_cnt; diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 60cb6dc3c6f0..63a1d69ff515 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -26,13 +26,13 @@ * Zhenyu Wang */ +#include <crypto/hash.h> #include <linux/types.h> #include <linux/list.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/blkdev.h> -#include <linux/crypto.h> #include <linux/delay.h> #include <linux/kfifo.h> #include <linux/scatterlist.h> @@ -214,7 +214,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, } else sg_init_one(&sg, segment->data + segment->copied, copied); - crypto_hash_update(segment->hash, &sg, copied); + ahash_request_set_crypt(segment->hash, &sg, NULL, copied); + crypto_ahash_update(segment->hash); } segment->copied += copied; @@ -260,7 +261,9 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, * is completely handled in hdr done function. */ if (segment->hash) { - crypto_hash_final(segment->hash, segment->digest); + ahash_request_set_crypt(segment->hash, NULL, + segment->digest, 0); + crypto_ahash_final(segment->hash); iscsi_tcp_segment_splice_digest(segment, recv ? segment->recv_digest : segment->digest); return 0; @@ -310,13 +313,14 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, } inline void -iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen, - unsigned char digest[ISCSI_DIGEST_SIZE]) +iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr, + size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE]) { struct scatterlist sg; sg_init_one(&sg, hdr, hdrlen); - crypto_hash_digest(hash, &sg, hdrlen, digest); + ahash_request_set_crypt(hash, &sg, digest, hdrlen); + crypto_ahash_digest(hash); } EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header); @@ -341,7 +345,7 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, */ static inline void __iscsi_segment_init(struct iscsi_segment *segment, size_t size, - iscsi_segment_done_fn_t *done, struct hash_desc *hash) + iscsi_segment_done_fn_t *done, struct ahash_request *hash) { memset(segment, 0, sizeof(*segment)); segment->total_size = size; @@ -349,14 +353,14 @@ __iscsi_segment_init(struct iscsi_segment *segment, size_t size, if (hash) { segment->hash = hash; - crypto_hash_init(hash); + crypto_ahash_init(hash); } } inline void iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, size_t size, iscsi_segment_done_fn_t *done, - struct hash_desc *hash) + struct ahash_request *hash) { __iscsi_segment_init(segment, size, done, hash); segment->data = data; @@ -368,7 +372,8 @@ inline int iscsi_segment_seek_sg(struct iscsi_segment *segment, struct scatterlist *sg_list, unsigned int sg_count, unsigned int offset, size_t size, - iscsi_segment_done_fn_t *done, struct hash_desc *hash) + iscsi_segment_done_fn_t *done, + struct ahash_request *hash) { struct scatterlist *sg; unsigned int i; @@ -431,7 +436,7 @@ static void iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) { struct iscsi_conn *conn = tcp_conn->iscsi_conn; - struct hash_desc *rx_hash = NULL; + struct ahash_request *rx_hash = NULL; if (conn->datadgst_en && !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) @@ -686,7 +691,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) if (tcp_conn->in.datalen) { struct iscsi_tcp_task *tcp_task = task->dd_data; - struct hash_desc *rx_hash = NULL; + struct ahash_request *rx_hash = NULL; struct scsi_data_buffer *sdb = scsi_in(task->sc); /* diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c index 079d50ebfa3a..94c01aad844b 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c +++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c @@ -27,7 +27,7 @@ * Copyright (c) 2012, Intel Corporation. */ -#include <linux/crypto.h> +#include <crypto/hash.h> #include <linux/scatterlist.h> #include "../../../include/linux/libcfs/libcfs.h" #include "linux-crypto.h" @@ -38,9 +38,11 @@ static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX]; static int cfs_crypto_hash_alloc(unsigned char alg_id, const struct cfs_crypto_hash_type **type, - struct hash_desc *desc, unsigned char *key, + struct ahash_request **req, + unsigned char *key, unsigned int key_len) { + struct crypto_ahash *tfm; int err = 0; *type = cfs_crypto_hash_type(alg_id); @@ -50,18 +52,23 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id, alg_id, CFS_HASH_ALG_MAX); return -EINVAL; } - desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); + tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC); - if (desc->tfm == NULL) - return -EINVAL; - - if (IS_ERR(desc->tfm)) { + if (IS_ERR(tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); - return PTR_ERR(desc->tfm); + return PTR_ERR(tfm); } - desc->flags = 0; + *req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!*req) { + CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n", + (*type)->cht_name); + crypto_free_ahash(tfm); + return -ENOMEM; + } + + ahash_request_set_callback(*req, 0, NULL, NULL); /** Shash have different logic for initialization then digest * shash: crypto_hash_setkey, crypto_hash_init @@ -70,23 +77,27 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id, * cfs_crypto_hash_alloc. */ if (key != NULL) - err = crypto_hash_setkey(desc->tfm, key, key_len); + err = crypto_ahash_setkey(tfm, key, key_len); else if ((*type)->cht_key != 0) - err = crypto_hash_setkey(desc->tfm, + err = crypto_ahash_setkey(tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); if (err != 0) { - crypto_free_hash(desc->tfm); + crypto_free_ahash(tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", - (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name, - (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name, + crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm), cfs_crypto_hash_speeds[alg_id]); - return crypto_hash_init(desc); + err = crypto_ahash_init(*req); + if (err) { + ahash_request_free(*req); + crypto_free_ahash(tfm); + } + return err; } int cfs_crypto_hash_digest(unsigned char alg_id, @@ -95,27 +106,29 @@ int cfs_crypto_hash_digest(unsigned char alg_id, unsigned char *hash, unsigned int *hash_len) { struct scatterlist sl; - struct hash_desc hdesc; + struct ahash_request *req; int err; const struct cfs_crypto_hash_type *type; if (buf == NULL || buf_len == 0 || hash_len == NULL) return -EINVAL; - err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len); + err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); if (err != 0) return err; if (hash == NULL || *hash_len < type->cht_size) { *hash_len = type->cht_size; - crypto_free_hash(hdesc.tfm); + crypto_free_ahash(crypto_ahash_reqtfm(req)); + ahash_request_free(req); return -ENOSPC; } sg_init_one(&sl, buf, buf_len); - hdesc.flags = 0; - err = crypto_hash_digest(&hdesc, &sl, sl.length, hash); - crypto_free_hash(hdesc.tfm); + ahash_request_set_crypt(req, &sl, hash, sl.length); + err = crypto_ahash_digest(req); + crypto_free_ahash(crypto_ahash_reqtfm(req)); + ahash_request_free(req); return err; } @@ -125,22 +138,15 @@ struct cfs_crypto_hash_desc * cfs_crypto_hash_init(unsigned char alg_id, unsigned char *key, unsigned int key_len) { - - struct hash_desc *hdesc; + struct ahash_request *req; int err; const struct cfs_crypto_hash_type *type; - hdesc = kmalloc(sizeof(*hdesc), 0); - if (hdesc == NULL) - return ERR_PTR(-ENOMEM); + err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); - err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len); - - if (err) { - kfree(hdesc); + if (err) return ERR_PTR(err); - } - return (struct cfs_crypto_hash_desc *)hdesc; + return (struct cfs_crypto_hash_desc *)req; } EXPORT_SYMBOL(cfs_crypto_hash_init); @@ -148,23 +154,27 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc, struct page *page, unsigned int offset, unsigned int len) { + struct ahash_request *req = (void *)hdesc; struct scatterlist sl; sg_init_table(&sl, 1); sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK); - return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length); + ahash_request_set_crypt(req, &sl, NULL, sl.length); + return crypto_ahash_update(req); } EXPORT_SYMBOL(cfs_crypto_hash_update_page); int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc, const void *buf, unsigned int buf_len) { + struct ahash_request *req = (void *)hdesc; struct scatterlist sl; sg_init_one(&sl, buf, buf_len); - return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length); + ahash_request_set_crypt(req, &sl, NULL, sl.length); + return crypto_ahash_update(req); } EXPORT_SYMBOL(cfs_crypto_hash_update); @@ -173,25 +183,27 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, unsigned char *hash, unsigned int *hash_len) { int err; - int size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm); + struct ahash_request *req = (void *)hdesc; + int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); if (hash_len == NULL) { - crypto_free_hash(((struct hash_desc *)hdesc)->tfm); - kfree(hdesc); + crypto_free_ahash(crypto_ahash_reqtfm(req)); + ahash_request_free(req); return 0; } if (hash == NULL || *hash_len < size) { *hash_len = size; return -ENOSPC; } - err = crypto_hash_final((struct hash_desc *) hdesc, hash); + ahash_request_set_crypt(req, NULL, hash, 0); + err = crypto_ahash_final(req); if (err < 0) { /* May be caller can fix error */ return err; } - crypto_free_hash(((struct hash_desc *)hdesc)->tfm); - kfree(hdesc); + crypto_free_ahash(crypto_ahash_reqtfm(req)); + ahash_request_free(req); return err; } EXPORT_SYMBOL(cfs_crypto_hash_final); diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c index 2096d78913bd..8eac7cdd5f3e 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c @@ -9,6 +9,8 @@ * more details. */ +#include <crypto/hash.h> +#include <crypto/skcipher.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -18,7 +20,6 @@ #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/string.h> -#include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/crc32.h> #include <linux/etherdevice.h> @@ -48,10 +49,10 @@ struct rtllib_tkip_data { u32 dot11RSNAStatsTKIPLocalMICFailures; int key_idx; - struct crypto_blkcipher *rx_tfm_arc4; - struct crypto_hash *rx_tfm_michael; - struct crypto_blkcipher *tx_tfm_arc4; - struct crypto_hash *tx_tfm_michael; + struct crypto_skcipher *rx_tfm_arc4; + struct crypto_ahash *rx_tfm_michael; + struct crypto_skcipher *tx_tfm_arc4; + struct crypto_ahash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16]; u8 tx_hdr[16]; @@ -65,32 +66,32 @@ static void *rtllib_tkip_init(int key_idx) if (priv == NULL) goto fail; priv->key_idx = key_idx; - priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, - CRYPTO_ALG_ASYNC); + priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, + CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_arc4)) { pr_debug("Could not allocate crypto API arc4\n"); priv->tx_tfm_arc4 = NULL; goto fail; } - priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, - CRYPTO_ALG_ASYNC); + priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0, + CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_michael)) { pr_debug("Could not allocate crypto API michael_mic\n"); priv->tx_tfm_michael = NULL; goto fail; } - priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, - CRYPTO_ALG_ASYNC); + priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, + CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_arc4)) { pr_debug("Could not allocate crypto API arc4\n"); priv->rx_tfm_arc4 = NULL; goto fail; } - priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, - CRYPTO_ALG_ASYNC); + priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0, + CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_michael)) { pr_debug("Could not allocate crypto API michael_mic\n"); priv->rx_tfm_michael = NULL; @@ -100,14 +101,10 @@ static void *rtllib_tkip_init(int key_idx) fail: if (priv) { - if (priv->tx_tfm_michael) - crypto_free_hash(priv->tx_tfm_michael); - if (priv->tx_tfm_arc4) - crypto_free_blkcipher(priv->tx_tfm_arc4); - if (priv->rx_tfm_michael) - crypto_free_hash(priv->rx_tfm_michael); - if (priv->rx_tfm_arc4) - crypto_free_blkcipher(priv->rx_tfm_arc4); + crypto_free_ahash(priv->tx_tfm_michael); + crypto_free_skcipher(priv->tx_tfm_arc4); + crypto_free_ahash(priv->rx_tfm_michael); + crypto_free_skcipher(priv->rx_tfm_arc4); kfree(priv); } @@ -120,14 +117,10 @@ static void rtllib_tkip_deinit(void *priv) struct rtllib_tkip_data *_priv = priv; if (_priv) { - if (_priv->tx_tfm_michael) - crypto_free_hash(_priv->tx_tfm_michael); - if (_priv->tx_tfm_arc4) - crypto_free_blkcipher(_priv->tx_tfm_arc4); - if (_priv->rx_tfm_michael) - crypto_free_hash(_priv->rx_tfm_michael); - if (_priv->rx_tfm_arc4) - crypto_free_blkcipher(_priv->rx_tfm_arc4); + crypto_free_ahash(_priv->tx_tfm_michael); + crypto_free_skcipher(_priv->tx_tfm_arc4); + crypto_free_ahash(_priv->rx_tfm_michael); + crypto_free_skcipher(_priv->rx_tfm_arc4); } kfree(priv); } @@ -301,7 +294,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) struct rtllib_hdr_4addr *hdr; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4}; int ret = 0; u8 rc4key[16], *icv; u32 crc; @@ -347,6 +339,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) *pos++ = (tkey->tx_iv32 >> 24) & 0xff; if (!tcb_desc->bHwSec) { + SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4); + icv = skb_put(skb, 4); crc = ~crc32_le(~0, pos, len); icv[0] = crc; @@ -357,8 +351,12 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) sg_init_one(&sg, pos, len+4); - crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); - ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); + crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); + skcipher_request_set_tfm(req, tkey->tx_tfm_arc4); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); } tkey->tx_iv16++; @@ -384,12 +382,12 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) struct rtllib_hdr_4addr *hdr; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4}; u8 rc4key[16]; u8 icv[4]; u32 crc; struct scatterlist sg; int plen; + int err; if (skb->len < hdr_len + 8 + 4) return -1; @@ -425,6 +423,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += 8; if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) { + SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4); + if ((iv32 < tkey->rx_iv32 || (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) && tkey->initialized) { @@ -450,8 +450,13 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) sg_init_one(&sg, pos, plen+4); - crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); - if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { + crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); + skcipher_request_set_tfm(req, tkey->rx_tfm_arc4); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); + err = crypto_skcipher_decrypt(req); + skcipher_request_zero(req); + if (err) { if (net_ratelimit()) { netdev_dbg(skb->dev, "Failed to decrypt received packet from %pM\n", @@ -500,11 +505,12 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) } -static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr, +static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr, u8 *data, size_t data_len, u8 *mic) { - struct hash_desc desc; + AHASH_REQUEST_ON_STACK(req, tfm_michael); struct scatterlist sg[2]; + int err; if (tfm_michael == NULL) { pr_warn("michael_mic: tfm_michael == NULL\n"); @@ -514,12 +520,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr, sg_set_buf(&sg[0], hdr, 16); sg_set_buf(&sg[1], data, data_len); - if (crypto_hash_setkey(tfm_michael, key, 8)) + if (crypto_ahash_setkey(tfm_michael, key, 8)) return -1; - desc.tfm = tfm_michael; - desc.flags = 0; - return crypto_hash_digest(&desc, sg, data_len + 16, mic); + ahash_request_set_tfm(req, tfm_michael); + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_crypt(req, sg, mic, data_len + 16); + err = crypto_ahash_digest(req); + ahash_request_zero(req); + return err; } static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr) @@ -655,10 +664,10 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv) { struct rtllib_tkip_data *tkey = priv; int keyidx; - struct crypto_hash *tfm = tkey->tx_tfm_michael; - struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4; - struct crypto_hash *tfm3 = tkey->rx_tfm_michael; - struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4; + struct crypto_ahash *tfm = tkey->tx_tfm_michael; + struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4; + struct crypto_ahash *tfm3 = tkey->rx_tfm_michael; + struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c index 21d7eee4c9a9..b3343a5d0fd6 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c @@ -9,6 +9,7 @@ * more details. */ +#include <crypto/skcipher.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -17,8 +18,6 @@ #include <linux/string.h> #include "rtllib.h" -#include <linux/crypto.h> - #include <linux/scatterlist.h> #include <linux/crc32.h> @@ -28,8 +27,8 @@ struct prism2_wep_data { u8 key[WEP_KEY_LEN + 1]; u8 key_len; u8 key_idx; - struct crypto_blkcipher *tx_tfm; - struct crypto_blkcipher *rx_tfm; + struct crypto_skcipher *tx_tfm; + struct crypto_skcipher *rx_tfm; }; @@ -42,13 +41,13 @@ static void *prism2_wep_init(int keyidx) goto fail; priv->key_idx = keyidx; - priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm)) { pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n"); priv->tx_tfm = NULL; goto fail; } - priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm)) { pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n"); priv->rx_tfm = NULL; @@ -62,10 +61,8 @@ static void *prism2_wep_init(int keyidx) fail: if (priv) { - if (priv->tx_tfm) - crypto_free_blkcipher(priv->tx_tfm); - if (priv->rx_tfm) - crypto_free_blkcipher(priv->rx_tfm); + crypto_free_skcipher(priv->tx_tfm); + crypto_free_skcipher(priv->rx_tfm); kfree(priv); } return NULL; @@ -77,10 +74,8 @@ static void prism2_wep_deinit(void *priv) struct prism2_wep_data *_priv = priv; if (_priv) { - if (_priv->tx_tfm) - crypto_free_blkcipher(_priv->tx_tfm); - if (_priv->rx_tfm) - crypto_free_blkcipher(_priv->rx_tfm); + crypto_free_skcipher(_priv->tx_tfm); + crypto_free_skcipher(_priv->rx_tfm); } kfree(priv); } @@ -99,10 +94,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 *pos; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; u32 crc; u8 *icv; struct scatterlist sg; + int err; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len){ @@ -140,6 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { + SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm); /* Append little-endian CRC32 and encrypt it to produce ICV */ crc = ~crc32_le(~0, pos, len); @@ -150,8 +146,13 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[3] = crc >> 24; sg_init_one(&sg, pos, len+4); - crypto_blkcipher_setkey(wep->tx_tfm, key, klen); - return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); + crypto_skcipher_setkey(wep->tx_tfm, key, klen); + skcipher_request_set_tfm(req, wep->tx_tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); + err = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); + return err; } return 0; @@ -173,10 +174,10 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 keyidx, *pos; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - struct blkcipher_desc desc = {.tfm = wep->rx_tfm}; u32 crc; u8 icv[4]; struct scatterlist sg; + int err; if (skb->len < hdr_len + 8) return -1; @@ -198,9 +199,16 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 8; if (!tcb_desc->bHwSec) { + SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm); + sg_init_one(&sg, pos, plen+4); - crypto_blkcipher_setkey(wep->rx_tfm, key, klen); - if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) + crypto_skcipher_setkey(wep->rx_tfm, key, klen); + skcipher_request_set_tfm(req, wep->rx_tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); + err = crypto_skcipher_decrypt(req); + skcipher_request_zero(req); + if (err) return -7; crc = ~crc32_le(~0, pos, plen); icv[0] = crc; diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c index 908bc2eb4d29..6fa96d57d316 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c @@ -21,7 +21,8 @@ #include "ieee80211.h" -#include <linux/crypto.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> #include <linux/scatterlist.h> #include <linux/crc32.h> @@ -52,10 +53,10 @@ struct ieee80211_tkip_data { int key_idx; - struct crypto_blkcipher *rx_tfm_arc4; - struct crypto_hash *rx_tfm_michael; - struct crypto_blkcipher *tx_tfm_arc4; - struct crypto_hash *tx_tfm_michael; + struct crypto_skcipher *rx_tfm_arc4; + struct crypto_ahash *rx_tfm_michael; + struct crypto_skcipher *tx_tfm_arc4; + struct crypto_ahash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; @@ -70,7 +71,7 @@ static void *ieee80211_tkip_init(int key_idx) goto fail; priv->key_idx = key_idx; - priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, + priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_arc4)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " @@ -79,7 +80,7 @@ static void *ieee80211_tkip_init(int key_idx) goto fail; } - priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, + priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_michael)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " @@ -88,7 +89,7 @@ static void *ieee80211_tkip_init(int key_idx) goto fail; } - priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, + priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_arc4)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " @@ -97,7 +98,7 @@ static void *ieee80211_tkip_init(int key_idx) goto fail; } - priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, + priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_michael)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " @@ -110,14 +111,10 @@ static void *ieee80211_tkip_init(int key_idx) fail: if (priv) { - if (priv->tx_tfm_michael) - crypto_free_hash(priv->tx_tfm_michael); - if (priv->tx_tfm_arc4) - crypto_free_blkcipher(priv->tx_tfm_arc4); - if (priv->rx_tfm_michael) - crypto_free_hash(priv->rx_tfm_michael); - if (priv->rx_tfm_arc4) - crypto_free_blkcipher(priv->rx_tfm_arc4); + crypto_free_ahash(priv->tx_tfm_michael); + crypto_free_skcipher(priv->tx_tfm_arc4); + crypto_free_ahash(priv->rx_tfm_michael); + crypto_free_skcipher(priv->rx_tfm_arc4); kfree(priv); } @@ -130,14 +127,10 @@ static void ieee80211_tkip_deinit(void *priv) struct ieee80211_tkip_data *_priv = priv; if (_priv) { - if (_priv->tx_tfm_michael) - crypto_free_hash(_priv->tx_tfm_michael); - if (_priv->tx_tfm_arc4) - crypto_free_blkcipher(_priv->tx_tfm_arc4); - if (_priv->rx_tfm_michael) - crypto_free_hash(_priv->rx_tfm_michael); - if (_priv->rx_tfm_arc4) - crypto_free_blkcipher(_priv->rx_tfm_arc4); + crypto_free_ahash(_priv->tx_tfm_michael); + crypto_free_skcipher(_priv->tx_tfm_arc4); + crypto_free_ahash(_priv->rx_tfm_michael); + crypto_free_skcipher(_priv->rx_tfm_arc4); } kfree(priv); } @@ -312,7 +305,6 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 *pos; struct rtl_80211_hdr_4addr *hdr; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4}; int ret = 0; u8 rc4key[16], *icv; u32 crc; @@ -357,15 +349,21 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) *pos++ = (tkey->tx_iv32 >> 24) & 0xff; if (!tcb_desc->bHwSec) { + SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4); + icv = skb_put(skb, 4); crc = ~crc32_le(~0, pos, len); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; - crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); + crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); sg_init_one(&sg, pos, len+4); - ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); + skcipher_request_set_tfm(req, tkey->tx_tfm_arc4); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); } tkey->tx_iv16++; @@ -390,12 +388,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) u16 iv16; struct rtl_80211_hdr_4addr *hdr; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4}; u8 rc4key[16]; u8 icv[4]; u32 crc; struct scatterlist sg; int plen; + int err; if (skb->len < hdr_len + 8 + 4) return -1; @@ -429,6 +427,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += 8; if (!tcb_desc->bHwSec) { + SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4); + if (iv32 < tkey->rx_iv32 || (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) { if (net_ratelimit()) { @@ -449,10 +449,16 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 12; - crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); + crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); sg_init_one(&sg, pos, plen+4); - if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { + skcipher_request_set_tfm(req, tkey->rx_tfm_arc4); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); + + err = crypto_skcipher_decrypt(req); + skcipher_request_zero(req); + if (err) { if (net_ratelimit()) { printk(KERN_DEBUG ": TKIP: failed to decrypt " "received packet from %pM\n", @@ -501,11 +507,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return keyidx; } -static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr, +static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr, u8 *data, size_t data_len, u8 *mic) { - struct hash_desc desc; + AHASH_REQUEST_ON_STACK(req, tfm_michael); struct scatterlist sg[2]; + int err; if (tfm_michael == NULL) { printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); @@ -516,12 +523,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr, sg_set_buf(&sg[0], hdr, 16); sg_set_buf(&sg[1], data, data_len); - if (crypto_hash_setkey(tfm_michael, key, 8)) + if (crypto_ahash_setkey(tfm_michael, key, 8)) return -1; - desc.tfm = tfm_michael; - desc.flags = 0; - return crypto_hash_digest(&desc, sg, data_len + 16, mic); + ahash_request_set_tfm(req, tfm_michael); + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_crypt(req, sg, mic, data_len + 16); + err = crypto_ahash_digest(req); + ahash_request_zero(req); + return err; } static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr) @@ -660,10 +670,10 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_tkip_data *tkey = priv; int keyidx; - struct crypto_hash *tfm = tkey->tx_tfm_michael; - struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4; - struct crypto_hash *tfm3 = tkey->rx_tfm_michael; - struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4; + struct crypto_ahash *tfm = tkey->tx_tfm_michael; + struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4; + struct crypto_ahash *tfm3 = tkey->rx_tfm_michael; + struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c index 681611dc93d3..ababb6de125b 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c @@ -18,7 +18,7 @@ #include "ieee80211.h" -#include <linux/crypto.h> +#include <crypto/skcipher.h> #include <linux/scatterlist.h> #include <linux/crc32.h> @@ -32,8 +32,8 @@ struct prism2_wep_data { u8 key[WEP_KEY_LEN + 1]; u8 key_len; u8 key_idx; - struct crypto_blkcipher *tx_tfm; - struct crypto_blkcipher *rx_tfm; + struct crypto_skcipher *tx_tfm; + struct crypto_skcipher *rx_tfm; }; @@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx) return NULL; priv->key_idx = keyidx; - priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm)) goto free_priv; - priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm)) goto free_tx; @@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx) return priv; free_tx: - crypto_free_blkcipher(priv->tx_tfm); + crypto_free_skcipher(priv->tx_tfm); free_priv: kfree(priv); return NULL; @@ -70,10 +70,8 @@ static void prism2_wep_deinit(void *priv) struct prism2_wep_data *_priv = priv; if (_priv) { - if (_priv->tx_tfm) - crypto_free_blkcipher(_priv->tx_tfm); - if (_priv->rx_tfm) - crypto_free_blkcipher(_priv->rx_tfm); + crypto_free_skcipher(_priv->tx_tfm); + crypto_free_skcipher(_priv->rx_tfm); } kfree(priv); } @@ -91,10 +89,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 key[WEP_KEY_LEN + 3]; u8 *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; u32 crc; u8 *icv; struct scatterlist sg; + int err; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len) @@ -129,6 +127,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { + SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm); + /* Append little-endian CRC32 and encrypt it to produce ICV */ crc = ~crc32_le(~0, pos, len); icv = skb_put(skb, 4); @@ -137,10 +137,16 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; - crypto_blkcipher_setkey(wep->tx_tfm, key, klen); + crypto_skcipher_setkey(wep->tx_tfm, key, klen); sg_init_one(&sg, pos, len+4); - return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); + skcipher_request_set_tfm(req, wep->tx_tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); + + err = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); + return err; } return 0; @@ -161,10 +167,10 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 key[WEP_KEY_LEN + 3]; u8 keyidx, *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - struct blkcipher_desc desc = {.tfm = wep->rx_tfm}; u32 crc; u8 icv[4]; struct scatterlist sg; + int err; if (skb->len < hdr_len + 8) return -1; @@ -186,10 +192,18 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 8; if (!tcb_desc->bHwSec) { - crypto_blkcipher_setkey(wep->rx_tfm, key, klen); + SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm); + + crypto_skcipher_setkey(wep->rx_tfm, key, klen); sg_init_one(&sg, pos, plen+4); - if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) + skcipher_request_set_tfm(req, wep->rx_tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); + + err = crypto_skcipher_decrypt(req); + skcipher_request_zero(req); + if (err) return -7; crc = ~crc32_le(~0, pos, plen); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 576a7a43470c..961202f4e9aa 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -16,9 +16,9 @@ * GNU General Public License for more details. ******************************************************************************/ +#include <crypto/hash.h> #include <linux/string.h> #include <linux/kthread.h> -#include <linux/crypto.h> #include <linux/completion.h> #include <linux/module.h> #include <linux/vmalloc.h> @@ -1190,7 +1190,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, } static u32 iscsit_do_crypto_hash_sg( - struct hash_desc *hash, + struct ahash_request *hash, struct iscsi_cmd *cmd, u32 data_offset, u32 data_length, @@ -1201,7 +1201,7 @@ static u32 iscsit_do_crypto_hash_sg( struct scatterlist *sg; unsigned int page_off; - crypto_hash_init(hash); + crypto_ahash_init(hash); sg = cmd->first_data_sg; page_off = cmd->first_data_sg_off; @@ -1209,7 +1209,8 @@ static u32 iscsit_do_crypto_hash_sg( while (data_length) { u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); - crypto_hash_update(hash, sg, cur_len); + ahash_request_set_crypt(hash, sg, NULL, cur_len); + crypto_ahash_update(hash); data_length -= cur_len; page_off = 0; @@ -1221,33 +1222,34 @@ static u32 iscsit_do_crypto_hash_sg( struct scatterlist pad_sg; sg_init_one(&pad_sg, pad_bytes, padding); - crypto_hash_update(hash, &pad_sg, padding); + ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc, + padding); + crypto_ahash_finup(hash); + } else { + ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0); + crypto_ahash_final(hash); } - crypto_hash_final(hash, (u8 *) &data_crc); return data_crc; } static void iscsit_do_crypto_hash_buf( - struct hash_desc *hash, + struct ahash_request *hash, const void *buf, u32 payload_length, u32 padding, u8 *pad_bytes, u8 *data_crc) { - struct scatterlist sg; + struct scatterlist sg[2]; - crypto_hash_init(hash); + sg_init_table(sg, ARRAY_SIZE(sg)); + sg_set_buf(sg, buf, payload_length); + sg_set_buf(sg + 1, pad_bytes, padding); - sg_init_one(&sg, buf, payload_length); - crypto_hash_update(hash, &sg, payload_length); + ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); - if (padding) { - sg_init_one(&sg, pad_bytes, padding); - crypto_hash_update(hash, &sg, padding); - } - crypto_hash_final(hash, data_crc); + crypto_ahash_digest(hash); } int @@ -1422,7 +1424,7 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (conn->conn_ops->DataDigest) { u32 data_crc; - data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, + data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd, be32_to_cpu(hdr->offset), payload_length, padding, cmd->pad_bytes); @@ -1682,7 +1684,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, + iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data, payload_length, padding, cmd->pad_bytes, (u8 *)&data_crc); @@ -2101,7 +2103,7 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, goto reject; if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, + iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in, payload_length, padding, (u8 *)&pad_bytes, (u8 *)&data_crc); @@ -2440,7 +2442,7 @@ static int iscsit_handle_immediate_data( if (conn->conn_ops->DataDigest) { u32 data_crc; - data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, + data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd, cmd->write_data_done, length, padding, cmd->pad_bytes); @@ -2553,7 +2555,7 @@ static int iscsit_send_conn_drop_async_message( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->tx_size += ISCSI_CRC_LEN; @@ -2683,7 +2685,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; @@ -2711,7 +2713,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) cmd->padding); } if (conn->conn_ops->DataDigest) { - cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd, + cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd, datain.offset, datain.length, cmd->padding, cmd->pad_bytes); iov[iov_count].iov_base = &cmd->data_crc; @@ -2857,7 +2859,7 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0], + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0], ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; @@ -2915,7 +2917,7 @@ static int iscsit_send_unsolicited_nopin( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); tx_size += ISCSI_CRC_LEN; @@ -2963,7 +2965,7 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; @@ -2993,7 +2995,7 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) " padding bytes.\n", padding); } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr, cmd->buf_ptr_size, padding, (u8 *)&cmd->pad_bytes, (u8 *)&cmd->data_crc); @@ -3049,7 +3051,7 @@ static int iscsit_send_r2t( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; @@ -3239,7 +3241,7 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->sense_buffer, (cmd->se_cmd.scsi_sense_length + padding), 0, NULL, (u8 *)&cmd->data_crc); @@ -3262,7 +3264,7 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; @@ -3332,7 +3334,7 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; @@ -3601,7 +3603,7 @@ static int iscsit_send_text_rsp( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; @@ -3611,7 +3613,7 @@ static int iscsit_send_text_rsp( } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr, text_length, 0, NULL, (u8 *)&cmd->data_crc); @@ -3668,7 +3670,7 @@ static int iscsit_send_reject( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; @@ -3678,7 +3680,7 @@ static int iscsit_send_reject( } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr, + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr, ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); iov[iov_count].iov_base = &cmd->data_crc; @@ -4145,7 +4147,7 @@ int iscsi_target_rx_thread(void *arg) goto transport_err; } - iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, + iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, ISCSI_HDR_LEN, 0, NULL, (u8 *)&checksum); @@ -4359,10 +4361,14 @@ int iscsit_close_connection( */ iscsit_check_conn_usage_count(conn); - if (conn->conn_rx_hash.tfm) - crypto_free_hash(conn->conn_rx_hash.tfm); - if (conn->conn_tx_hash.tfm) - crypto_free_hash(conn->conn_tx_hash.tfm); + ahash_request_free(conn->conn_tx_hash); + if (conn->conn_rx_hash) { + struct crypto_ahash *tfm; + + tfm = crypto_ahash_reqtfm(conn->conn_rx_hash); + ahash_request_free(conn->conn_rx_hash); + crypto_free_ahash(tfm); + } free_cpumask_var(conn->conn_cpumask); diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 47e249dccb5f..667406fcf4d3 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -16,9 +16,9 @@ * GNU General Public License for more details. ******************************************************************************/ +#include <crypto/hash.h> #include <linux/kernel.h> #include <linux/string.h> -#include <linux/crypto.h> #include <linux/err.h> #include <linux/scatterlist.h> @@ -185,9 +185,8 @@ static int chap_server_compute_md5( unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; size_t compare_len; struct iscsi_chap *chap = conn->auth_protocol; - struct crypto_hash *tfm; - struct hash_desc desc; - struct scatterlist sg; + struct crypto_shash *tfm = NULL; + struct shash_desc *desc = NULL; int auth_ret = -1, ret, challenge_len; memset(identifier, 0, 10); @@ -245,52 +244,47 @@ static int chap_server_compute_md5( pr_debug("[server] Got CHAP_R=%s\n", chap_r); chap_string_to_hex(client_digest, chap_r, strlen(chap_r)); - tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(tfm)) { - pr_err("Unable to allocate struct crypto_hash\n"); + tfm = NULL; + pr_err("Unable to allocate struct crypto_shash\n"); goto out; } - desc.tfm = tfm; - desc.flags = 0; - ret = crypto_hash_init(&desc); - if (ret < 0) { - pr_err("crypto_hash_init() failed\n"); - crypto_free_hash(tfm); + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + pr_err("Unable to allocate struct shash_desc\n"); goto out; } - sg_init_one(&sg, &chap->id, 1); - ret = crypto_hash_update(&desc, &sg, 1); + desc->tfm = tfm; + desc->flags = 0; + + ret = crypto_shash_init(desc); if (ret < 0) { - pr_err("crypto_hash_update() failed for id\n"); - crypto_free_hash(tfm); + pr_err("crypto_shash_init() failed\n"); goto out; } - sg_init_one(&sg, &auth->password, strlen(auth->password)); - ret = crypto_hash_update(&desc, &sg, strlen(auth->password)); + ret = crypto_shash_update(desc, &chap->id, 1); if (ret < 0) { - pr_err("crypto_hash_update() failed for password\n"); - crypto_free_hash(tfm); + pr_err("crypto_shash_update() failed for id\n"); goto out; } - sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH); - ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH); + ret = crypto_shash_update(desc, (char *)&auth->password, + strlen(auth->password)); if (ret < 0) { - pr_err("crypto_hash_update() failed for challenge\n"); - crypto_free_hash(tfm); + pr_err("crypto_shash_update() failed for password\n"); goto out; } - ret = crypto_hash_final(&desc, server_digest); + ret = crypto_shash_finup(desc, chap->challenge, + CHAP_CHALLENGE_LENGTH, server_digest); if (ret < 0) { - pr_err("crypto_hash_final() failed for server digest\n"); - crypto_free_hash(tfm); + pr_err("crypto_shash_finup() failed for challenge\n"); goto out; } - crypto_free_hash(tfm); chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); pr_debug("[server] MD5 Server Digest: %s\n", response); @@ -306,9 +300,8 @@ static int chap_server_compute_md5( * authentication is not enabled. */ if (!auth->authenticate_target) { - kfree(challenge); - kfree(challenge_binhex); - return 0; + auth_ret = 0; + goto out; } /* * Get CHAP_I. @@ -372,58 +365,37 @@ static int chap_server_compute_md5( /* * Generate CHAP_N and CHAP_R for mutual authentication. */ - tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) { - pr_err("Unable to allocate struct crypto_hash\n"); - goto out; - } - desc.tfm = tfm; - desc.flags = 0; - - ret = crypto_hash_init(&desc); + ret = crypto_shash_init(desc); if (ret < 0) { - pr_err("crypto_hash_init() failed\n"); - crypto_free_hash(tfm); + pr_err("crypto_shash_init() failed\n"); goto out; } /* To handle both endiannesses */ id_as_uchar = id; - sg_init_one(&sg, &id_as_uchar, 1); - ret = crypto_hash_update(&desc, &sg, 1); + ret = crypto_shash_update(desc, &id_as_uchar, 1); if (ret < 0) { - pr_err("crypto_hash_update() failed for id\n"); - crypto_free_hash(tfm); + pr_err("crypto_shash_update() failed for id\n"); goto out; } - sg_init_one(&sg, auth->password_mutual, - strlen(auth->password_mutual)); - ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual)); + ret = crypto_shash_update(desc, auth->password_mutual, + strlen(auth->password_mutual)); if (ret < 0) { - pr_err("crypto_hash_update() failed for" + pr_err("crypto_shash_update() failed for" " password_mutual\n"); - crypto_free_hash(tfm); goto out; } /* * Convert received challenge to binary hex. */ - sg_init_one(&sg, challenge_binhex, challenge_len); - ret = crypto_hash_update(&desc, &sg, challenge_len); + ret = crypto_shash_finup(desc, challenge_binhex, challenge_len, + digest); if (ret < 0) { - pr_err("crypto_hash_update() failed for ma challenge\n"); - crypto_free_hash(tfm); + pr_err("crypto_shash_finup() failed for ma challenge\n"); goto out; } - ret = crypto_hash_final(&desc, digest); - if (ret < 0) { - pr_err("crypto_hash_final() failed for ma digest\n"); - crypto_free_hash(tfm); - goto out; - } - crypto_free_hash(tfm); /* * Generate CHAP_N and CHAP_R. */ @@ -440,6 +412,8 @@ static int chap_server_compute_md5( pr_debug("[server] Sending CHAP_R=0x%s\n", response); auth_ret = 0; out: + kzfree(desc); + crypto_free_shash(tfm); kfree(challenge); kfree(challenge_binhex); return auth_ret; diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 96e78c823d13..8436d56c5f0c 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -16,9 +16,9 @@ * GNU General Public License for more details. ******************************************************************************/ +#include <crypto/hash.h> #include <linux/string.h> #include <linux/kthread.h> -#include <linux/crypto.h> #include <linux/idr.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> @@ -115,27 +115,36 @@ out_login: */ int iscsi_login_setup_crypto(struct iscsi_conn *conn) { + struct crypto_ahash *tfm; + /* * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback * to software 1x8 byte slicing from crc32c.ko */ - conn->conn_rx_hash.flags = 0; - conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(conn->conn_rx_hash.tfm)) { - pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n"); + tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + pr_err("crypto_alloc_ahash() failed\n"); return -ENOMEM; } - conn->conn_tx_hash.flags = 0; - conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(conn->conn_tx_hash.tfm)) { - pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n"); - crypto_free_hash(conn->conn_rx_hash.tfm); + conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!conn->conn_rx_hash) { + pr_err("ahash_request_alloc() failed for conn_rx_hash\n"); + crypto_free_ahash(tfm); + return -ENOMEM; + } + ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL); + + conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!conn->conn_tx_hash) { + pr_err("ahash_request_alloc() failed for conn_tx_hash\n"); + ahash_request_free(conn->conn_rx_hash); + conn->conn_rx_hash = NULL; + crypto_free_ahash(tfm); return -ENOMEM; } + ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL); return 0; } @@ -1174,10 +1183,14 @@ old_sess_out: iscsit_dec_session_usage_count(conn->sess); } - if (!IS_ERR(conn->conn_rx_hash.tfm)) - crypto_free_hash(conn->conn_rx_hash.tfm); - if (!IS_ERR(conn->conn_tx_hash.tfm)) - crypto_free_hash(conn->conn_tx_hash.tfm); + ahash_request_free(conn->conn_tx_hash); + if (conn->conn_rx_hash) { + struct crypto_ahash *tfm; + + tfm = crypto_ahash_reqtfm(conn->conn_rx_hash); + ahash_request_free(conn->conn_rx_hash); + crypto_free_ahash(tfm); + } free_cpumask_var(conn->conn_cpumask); diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c index 50ce80d604f3..8ed8e34c3492 100644 --- a/drivers/usb/wusbcore/crypto.c +++ b/drivers/usb/wusbcore/crypto.c @@ -45,6 +45,7 @@ * funneled through AES are...16 bytes in size! */ +#include <crypto/skcipher.h> #include <linux/crypto.h> #include <linux/module.h> #include <linux/err.h> @@ -195,21 +196,22 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2, * NOTE: blen is not aligned to a block size, we'll pad zeros, that's * what sg[4] is for. Maybe there is a smarter way to do this. */ -static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, +static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, struct crypto_cipher *tfm_aes, void *mic, const struct aes_ccm_nonce *n, const struct aes_ccm_label *a, const void *b, size_t blen) { int result = 0; - struct blkcipher_desc desc; + SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc); struct aes_ccm_b0 b0; struct aes_ccm_b1 b1; struct aes_ccm_a ax; struct scatterlist sg[4], sg_dst; - void *iv, *dst_buf; - size_t ivsize, dst_size; + void *dst_buf; + size_t dst_size; const u8 bzero[16] = { 0 }; + u8 iv[crypto_skcipher_ivsize(tfm_cbc)]; size_t zero_padding; /* @@ -232,9 +234,7 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, goto error_dst_buf; } - iv = crypto_blkcipher_crt(tfm_cbc)->iv; - ivsize = crypto_blkcipher_ivsize(tfm_cbc); - memset(iv, 0, ivsize); + memset(iv, 0, sizeof(iv)); /* Setup B0 */ b0.flags = 0x59; /* Format B0 */ @@ -259,9 +259,11 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, sg_set_buf(&sg[3], bzero, zero_padding); sg_init_one(&sg_dst, dst_buf, dst_size); - desc.tfm = tfm_cbc; - desc.flags = 0; - result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size); + skcipher_request_set_tfm(req, tfm_cbc); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv); + result = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); if (result < 0) { printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n", result); @@ -301,18 +303,18 @@ ssize_t wusb_prf(void *out, size_t out_size, { ssize_t result, bytes = 0, bitr; struct aes_ccm_nonce n = *_n; - struct crypto_blkcipher *tfm_cbc; + struct crypto_skcipher *tfm_cbc; struct crypto_cipher *tfm_aes; u64 sfn = 0; __le64 sfn_le; - tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); + tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_cbc)) { result = PTR_ERR(tfm_cbc); printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result); goto error_alloc_cbc; } - result = crypto_blkcipher_setkey(tfm_cbc, key, 16); + result = crypto_skcipher_setkey(tfm_cbc, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result); goto error_setkey_cbc; @@ -345,7 +347,7 @@ error_setkey_aes: crypto_free_cipher(tfm_aes); error_alloc_aes: error_setkey_cbc: - crypto_free_blkcipher(tfm_cbc); + crypto_free_skcipher(tfm_cbc); error_alloc_cbc: return result; } |