summaryrefslogtreecommitdiff
path: root/include/net/tls.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tls.h')
-rw-r--r--include/net/tls.h131
1 files changed, 102 insertions, 29 deletions
diff --git a/include/net/tls.h b/include/net/tls.h
index 53d96bca220d..0279938386ab 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -40,6 +40,7 @@
#include <linux/socket.h>
#include <linux/tcp.h>
#include <linux/skmsg.h>
+#include <linux/netdevice.h>
#include <net/tcp.h>
#include <net/strparser.h>
@@ -61,6 +62,7 @@
#define TLS_DEVICE_NAME_MAX 32
#define MAX_IV_SIZE 16
+#define TLS_MAX_REC_SEQ_SIZE 8
/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
*
@@ -197,20 +199,24 @@ struct tls_offload_context_tx {
struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
void (*sk_destruct)(struct sock *sk);
- u8 driver_state[];
+ u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
-#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
+#define TLS_DRIVER_STATE_SIZE_TX 16
};
#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
- (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
- TLS_DRIVER_STATE_SIZE)
+ (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
enum tls_context_flags {
TLS_RX_SYNC_RUNNING = 0,
+ /* Unlike RX where resync is driven entirely by the core in TX only
+ * the driver knows when things went out of sync, so we need the flag
+ * to be atomic.
+ */
+ TLS_TX_SYNC_SCHED = 1,
};
struct cipher_context {
@@ -240,34 +246,32 @@ struct tls_prot_info {
};
struct tls_context {
+ /* read-only cache line */
struct tls_prot_info prot_info;
- union tls_crypto_context crypto_send;
- union tls_crypto_context crypto_recv;
+ u8 tx_conf:3;
+ u8 rx_conf:3;
- struct list_head list;
- struct net_device *netdev;
- refcount_t refcount;
+ int (*push_pending_record)(struct sock *sk, int flags);
+ void (*sk_write_space)(struct sock *sk);
void *priv_ctx_tx;
void *priv_ctx_rx;
- u8 tx_conf:3;
- u8 rx_conf:3;
+ struct net_device *netdev;
+ /* rw cache line */
struct cipher_context tx;
struct cipher_context rx;
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
- unsigned long flags;
bool in_tcp_sendpages;
bool pending_open_record_frags;
+ unsigned long flags;
- int (*push_pending_record)(struct sock *sk, int flags);
-
- void (*sk_write_space)(struct sock *sk);
+ /* cache cold stuff */
void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);
@@ -279,6 +283,12 @@ struct tls_context {
int __user *optlen);
int (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
+
+ union tls_crypto_context crypto_send;
+ union tls_crypto_context crypto_recv;
+
+ struct list_head list;
+ refcount_t refcount;
};
enum tls_offload_ctx_dir {
@@ -294,24 +304,48 @@ struct tlsdev_ops {
void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx,
enum tls_offload_ctx_dir direction);
- void (*tls_dev_resync_rx)(struct net_device *netdev,
- struct sock *sk, u32 seq, u64 rcd_sn);
+ void (*tls_dev_resync)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u8 *rcd_sn,
+ enum tls_offload_ctx_dir direction);
};
+enum tls_offload_sync_type {
+ TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
+ TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
+};
+
+#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
+#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
+
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
- atomic64_t resync_req;
- u8 driver_state[];
+ enum tls_offload_sync_type resync_type;
+ /* this member is set regardless of resync_type, to avoid branches */
+ u8 resync_nh_reset:1;
+ /* CORE_NEXT_HINT-only member, but use the hole here */
+ u8 resync_nh_do_now:1;
+ union {
+ /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
+ struct {
+ atomic64_t resync_req;
+ };
+ /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
+ struct {
+ u32 decrypted_failed;
+ u32 decrypted_tgt;
+ } resync_nh;
+ };
+ u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
+#define TLS_DRIVER_STATE_SIZE_RX 8
};
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
- (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
- TLS_DRIVER_STATE_SIZE)
+ (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
@@ -431,19 +465,15 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
}
static inline void tls_advance_record_sn(struct sock *sk,
- struct cipher_context *ctx,
- int version)
+ struct tls_prot_info *prot,
+ struct cipher_context *ctx)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_prot_info *prot = &tls_ctx->prot_info;
-
if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
tls_err_abort(sk, EBADMSG);
- if (version != TLS_1_3_VERSION) {
+ if (prot->version != TLS_1_3_VERSION)
tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
prot->iv_size);
- }
}
static inline void tls_fill_prepend(struct tls_context *ctx,
@@ -545,6 +575,23 @@ tls_offload_ctx_rx(const struct tls_context *tls_ctx)
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ return tls_offload_ctx_tx(tls_ctx)->driver_state;
+ else
+ return tls_offload_ctx_rx(tls_ctx)->driver_state;
+}
+
+static inline void *
+tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
+{
+ return __tls_driver_ctx(tls_get_ctx(sk), direction);
+}
+#endif
+
/* The TLS context is valid until sk_destruct is called */
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
@@ -554,6 +601,31 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
}
+static inline void
+tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_offload_ctx_rx(tls_ctx)->resync_type = type;
+}
+
+static inline void tls_offload_tx_resync_request(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
+}
+
+/* Driver's seq tracking has to be disabled until resync succeeded */
+static inline bool tls_offload_tx_resync_pending(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ bool ret;
+
+ ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
+ smp_mb__after_atomic();
+ return ret;
+}
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
@@ -562,6 +634,7 @@ void tls_unregister_device(struct tls_device *device);
int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
+struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
@@ -574,6 +647,6 @@ int tls_sw_fallback_init(struct sock *sk,
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
-void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
#endif /* _TLS_OFFLOAD_H */