diff options
author | Sujoy Ray <sujoy.ray@intel.com> | 2022-05-04 21:01:19 +0300 |
---|---|---|
committer | Sujoy Ray <sujoy.ray@intel.com> | 2022-05-12 17:46:40 +0300 |
commit | efe6d9649b1d6b85b50cef64745df2e6749a8a45 (patch) | |
tree | e9aca55fa1fa29fea638ee52832fa9691fdd6f02 /drivers/nvme/host/tcp.c | |
parent | ab95859fee776e58934d2b0cc1f4e93810e66508 (diff) | |
parent | 49caedb668e476c100d727f2174724e0610a2b92 (diff) | |
download | linux-efe6d9649b1d6b85b50cef64745df2e6749a8a45.tar.xz |
Merge commit '49caedb668e476c100d727f2174724e0610a2b92' of https://github.com/openbmc/linux into openbmc/dev-5.15-intel-bump_v5.15.36
Signed-off-by: Sujoy Ray <sujoy.ray@intel.com>
Diffstat (limited to 'drivers/nvme/host/tcp.c')
-rw-r--r-- | drivers/nvme/host/tcp.c | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index efa9037da53c..10882d3d554c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -30,6 +30,44 @@ static int so_priority; module_param(so_priority, int, 0644); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* lockdep can detect a circular dependency of the form + * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock + * because dependencies are tracked for both nvme-tcp and user contexts. Using + * a separate class prevents lockdep from conflating nvme-tcp socket use with + * user-space socket API use. + */ +static struct lock_class_key nvme_tcp_sk_key[2]; +static struct lock_class_key nvme_tcp_slock_key[2]; + +static void nvme_tcp_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; + + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", + &nvme_tcp_slock_key[0], + "sk_lock-AF_INET-NVME", + &nvme_tcp_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", + &nvme_tcp_slock_key[1], + "sk_lock-AF_INET6-NVME", + &nvme_tcp_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +} +#else +static void nvme_tcp_reclassify_socket(struct socket *sock) { } +#endif + enum nvme_tcp_send_state { NVME_TCP_SEND_CMD_PDU = 0, NVME_TCP_SEND_H2C_PDU, @@ -1436,6 +1474,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, goto err_destroy_mutex; } + nvme_tcp_reclassify_socket(queue->sock); + /* Single syn retry */ tcp_sock_set_syncnt(queue->sock->sk, 1); @@ -2105,6 +2145,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; nvme_stop_keep_alive(ctrl); + flush_work(&ctrl->async_event_work); nvme_tcp_teardown_io_queues(ctrl, false); /* unquiesce to fail fast pending requests */ nvme_start_queues(ctrl); |