summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/smb/server/vfs_cache.c102
1 files changed, 76 insertions, 26 deletions
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index dc4037ef1834..354c4d8a1cfb 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -418,6 +418,14 @@ static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
return;
idr_remove(global_ft.idr, fp->persistent_id);
+ /*
+ * Clear persistent_id so a later __ksmbd_close_fd() that runs from a
+ * delayed putter (e.g. when a concurrent ksmbd_lookup_fd_inode()
+ * walker held the final reference) does not re-issue idr_remove() on
+ * an id that idr_alloc_cyclic() may have already handed out to a new
+ * durable handle.
+ */
+ fp->persistent_id = KSMBD_NO_FID;
}
static void ksmbd_remove_durable_fd(struct ksmbd_file *fp)
@@ -521,6 +529,20 @@ static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
{
+ /*
+ * Detached durable fp -- session_fd_check() cleared fp->conn at
+ * preserve, so this fp is no longer tracked by any conn's
+ * stats.open_files_count. This happens when
+ * ksmbd_scavenger_dispose_dh() hands the final close off to an
+ * m_fp_list walker (e.g. ksmbd_lookup_fd_inode()) whose work->conn
+ * is unrelated to the conn that originally opened the handle; close
+ * via the NULL-ft path so we do not underflow that unrelated
+ * counter.
+ */
+ if (!fp->conn) {
+ __ksmbd_close_fd(NULL, fp);
+ return;
+ }
__ksmbd_close_fd(&work->sess->file_table, fp);
atomic_dec(&work->conn->stats.open_files_count);
}
@@ -1033,24 +1055,37 @@ static bool ksmbd_durable_scavenger_alive(void)
return true;
}
-static void ksmbd_scavenger_dispose_dh(struct list_head *head)
+static void ksmbd_scavenger_dispose_dh(struct ksmbd_file *fp)
{
- while (!list_empty(head)) {
- struct ksmbd_file *fp;
+ /*
+ * Durable-preserved fp can remain linked on f_ci->m_fp_list for
+ * share-mode checks. Unlink it before final close; fp->node is not
+ * available as a scavenger-private list node because re-adding it to
+ * another list corrupts m_fp_list.
+ */
+ down_write(&fp->f_ci->m_lock);
+ list_del_init(&fp->node);
+ up_write(&fp->f_ci->m_lock);
- fp = list_first_entry(head, struct ksmbd_file, node);
- list_del_init(&fp->node);
+ /*
+ * Drop both the durable lifetime reference and the transient reference
+ * taken by the scavenger under global_ft.lock. If a concurrent
+ * ksmbd_lookup_fd_inode() (or any other m_fp_list walker) snatched fp
+ * before the unlink above, that holder owns the final close via
+ * ksmbd_fd_put() -> __ksmbd_close_fd(). Otherwise the scavenger is
+ * the last putter and finalises fp here.
+ */
+ if (atomic_sub_and_test(2, &fp->refcount))
__ksmbd_close_fd(NULL, fp);
- }
}
static int ksmbd_durable_scavenger(void *dummy)
{
struct ksmbd_file *fp = NULL;
+ struct ksmbd_file *expired_fp;
unsigned int id;
unsigned int min_timeout = 1;
bool found_fp_timeout;
- LIST_HEAD(scavenger_list);
unsigned long remaining_jiffies;
__module_get(THIS_MODULE);
@@ -1060,8 +1095,6 @@ static int ksmbd_durable_scavenger(void *dummy)
if (try_to_freeze())
continue;
- found_fp_timeout = false;
-
remaining_jiffies = wait_event_timeout(dh_wq,
ksmbd_durable_scavenger_alive() == false,
__msecs_to_jiffies(min_timeout));
@@ -1070,23 +1103,39 @@ static int ksmbd_durable_scavenger(void *dummy)
else
min_timeout = DURABLE_HANDLE_MAX_TIMEOUT;
- write_lock(&global_ft.lock);
- idr_for_each_entry(global_ft.idr, fp, id) {
- if (!fp->durable_timeout)
- continue;
+ do {
+ expired_fp = NULL;
+ found_fp_timeout = false;
- if (atomic_read(&fp->refcount) > 1 ||
- fp->conn)
- continue;
-
- found_fp_timeout = true;
- if (fp->durable_scavenger_timeout <=
- jiffies_to_msecs(jiffies)) {
- __ksmbd_remove_durable_fd(fp);
- list_add(&fp->node, &scavenger_list);
- } else {
+ write_lock(&global_ft.lock);
+ idr_for_each_entry(global_ft.idr, fp, id) {
unsigned long durable_timeout;
+ if (!fp->durable_timeout)
+ continue;
+
+ if (atomic_read(&fp->refcount) > 1 ||
+ fp->conn)
+ continue;
+
+ found_fp_timeout = true;
+ if (fp->durable_scavenger_timeout <=
+ jiffies_to_msecs(jiffies)) {
+ __ksmbd_remove_durable_fd(fp);
+ /*
+ * Take a transient reference so fp
+ * cannot be freed by an in-flight
+ * ksmbd_lookup_fd_inode() that found
+ * it through f_ci->m_fp_list while we
+ * drop global_ft.lock and reach the
+ * m_fp_list unlink in
+ * ksmbd_scavenger_dispose_dh().
+ */
+ atomic_inc(&fp->refcount);
+ expired_fp = fp;
+ break;
+ }
+
durable_timeout =
fp->durable_scavenger_timeout -
jiffies_to_msecs(jiffies);
@@ -1094,10 +1143,11 @@ static int ksmbd_durable_scavenger(void *dummy)
if (min_timeout > durable_timeout)
min_timeout = durable_timeout;
}
- }
- write_unlock(&global_ft.lock);
+ write_unlock(&global_ft.lock);
- ksmbd_scavenger_dispose_dh(&scavenger_list);
+ if (expired_fp)
+ ksmbd_scavenger_dispose_dh(expired_fp);
+ } while (expired_fp);
if (found_fp_timeout == false)
break;