diff options
author | Jakub Kicinski <kuba@kernel.org> | 2024-04-25 22:40:48 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-04-25 22:41:37 +0300 |
commit | 2bd87951de659df3381ce083342aaf5b1ea24689 (patch) | |
tree | e2635c49fddc29a53b8f537212dc6eb5b2013851 /fs | |
parent | ec00ed472bdb7d0af840da68c8c11bff9f4d9caa (diff) | |
parent | 52afb15e9d9a021ab6eec923a087ec9f518cb713 (diff) | |
download | linux-2bd87951de659df3381ce083342aaf5b1ea24689.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR.
Conflicts:
drivers/net/ethernet/ti/icssg/icssg_prueth.c
net/mac80211/chan.c
89884459a0b9 ("wifi: mac80211: fix idle calculation with multi-link")
87f5500285fb ("wifi: mac80211: simplify ieee80211_assign_link_chanctx()")
https://lore.kernel.org/all/20240422105623.7b1fbda2@canb.auug.org.au/
net/unix/garbage.c
1971d13ffa84 ("af_unix: Suppress false-positive lockdep splat for spin_lock() in __unix_gc().")
4090fa373f0e ("af_unix: Replace garbage collection algorithm.")
drivers/net/ethernet/ti/icssg/icssg_prueth.c
drivers/net/ethernet/ti/icssg/icssg_common.c
4dcd0e83ea1d ("net: ti: icssg-prueth: Fix signedness bug in prueth_init_rx_chns()")
e2dc7bfd677f ("net: ti: icssg-prueth: Move common functions into a separate file")
No adjacent changes.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'fs')
58 files changed, 442 insertions, 190 deletions
diff --git a/fs/9p/fid.h b/fs/9p/fid.h index 29281b7c3887..0d6138bee2a3 100644 --- a/fs/9p/fid.h +++ b/fs/9p/fid.h @@ -49,9 +49,6 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry) static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags, unsigned int s_cache, unsigned int f_flags) { - if (fid->qid.type != P9_QTFILE) - return; - if ((!s_cache) || ((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) || (s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) { diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index abdbbaee5184..348cc90bf9c5 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -520,6 +520,7 @@ const struct file_operations v9fs_file_operations = { .splice_read = v9fs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = v9fs_file_fsync, + .setlease = simple_nosetlease, }; const struct file_operations v9fs_file_operations_dotl = { @@ -534,4 +535,5 @@ const struct file_operations v9fs_file_operations_dotl = { .splice_read = v9fs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = v9fs_file_fsync_dotl, + .setlease = simple_nosetlease, }; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index b01b1bbf2493..47bd77199e20 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -83,7 +83,7 @@ static int p9mode2perm(struct v9fs_session_info *v9ses, int res; int mode = stat->mode; - res = mode & S_IALLUGO; + res = mode & 0777; /* S_IRWXUGO */ if (v9fs_proto_dotu(v9ses)) { if ((mode & P9_DMSETUID) == P9_DMSETUID) res |= S_ISUID; @@ -178,6 +178,9 @@ int v9fs_uflags2omode(int uflags, int extended) break; } + if (uflags & O_TRUNC) + ret |= P9_OTRUNC; + if (extended) { if (uflags & O_EXCL) ret |= P9_OEXCL; @@ -1061,8 +1064,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, struct v9fs_session_info *v9ses = sb->s_fs_info; struct v9fs_inode *v9inode = V9FS_I(inode); - set_nlink(inode, 1); - inode_set_atime(inode, stat->atime, 0); inode_set_mtime(inode, stat->mtime, 0); inode_set_ctime(inode, stat->mtime, 0); diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 4236058c7bbd..55e67e36ae68 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -244,6 +244,21 @@ done: return res; } +static int v9fs_drop_inode(struct inode *inode) +{ + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(inode); + if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) + return generic_drop_inode(inode); + /* + * in case of non cached mode always drop the + * inode because we want the inode attribute + * to always match that on the server. + */ + return 1; +} + static int v9fs_write_inode(struct inode *inode, struct writeback_control *wbc) { @@ -268,6 +283,7 @@ static const struct super_operations v9fs_super_ops = { .alloc_inode = v9fs_alloc_inode, .free_inode = v9fs_free_inode, .statfs = simple_statfs, + .drop_inode = v9fs_drop_inode, .evict_inode = v9fs_evict_inode, .show_options = v9fs_show_options, .umount_begin = v9fs_umount_begin, @@ -278,6 +294,7 @@ static const struct super_operations v9fs_super_ops_dotl = { .alloc_inode = v9fs_alloc_inode, .free_inode = v9fs_free_inode, .statfs = v9fs_statfs, + .drop_inode = v9fs_drop_inode, .evict_inode = v9fs_evict_inode, .show_options = v9fs_show_options, .umount_begin = v9fs_umount_begin, diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index fadb1078903d..a20044201002 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -470,7 +470,7 @@ found: goto err; } - bio = bio_alloc(ca->disk_sb.bdev, 1, REQ_OP_READ, GFP_KERNEL); + bio = bio_alloc(ca->disk_sb.bdev, buf_pages(data_buf, bytes), REQ_OP_READ, GFP_KERNEL); bio->bi_iter.bi_sector = p.ptr.offset; bch2_bio_map(bio, data_buf, bytes); ret = submit_bio_wait(bio); diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index 085987435a5e..f7fbfccd2b1e 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -1504,7 +1504,8 @@ enum btree_id_flags { BIT_ULL(KEY_TYPE_stripe)) \ x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \ BIT_ULL(KEY_TYPE_reflink_v)| \ - BIT_ULL(KEY_TYPE_indirect_inline_data)) \ + BIT_ULL(KEY_TYPE_indirect_inline_data)| \ + BIT_ULL(KEY_TYPE_error)) \ x(subvolumes, 8, 0, \ BIT_ULL(KEY_TYPE_subvolume)) \ x(snapshots, 9, 0, \ diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index ecbd9598f69f..791470b0c654 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -1587,7 +1587,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans, struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); ret = PTR_ERR_OR_ZERO(new); if (ret) - return ret; + goto out; if (!r->refcount) new->k.type = KEY_TYPE_deleted; @@ -1595,6 +1595,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans, *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount); ret = bch2_trans_update(trans, iter, new, 0); } +out: fsck_err: printbuf_exit(&buf); return ret; diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 9678b2375bed..debb0edc3455 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -888,7 +888,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, btree_node_bkey_bad_u64s, - "bad k->u64s %u (min %u max %lu)", k->u64s, + "bad k->u64s %u (min %u max %zu)", k->u64s, bkeyp_key_u64s(&b->format, k), U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k))) goto drop_this_key; diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 88a3582a3275..e8c1c530cd95 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -842,8 +842,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, * Newest freed entries are at the end of the list - once we hit one * that's too new to be freed, we can bail out: */ - scanned += bc->nr_freed_nonpcpu; - list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) { if (!poll_state_synchronize_srcu(&c->btree_trans_barrier, ck->btree_trans_barrier_seq)) @@ -857,11 +855,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, bc->nr_freed_nonpcpu--; } - if (scanned >= nr) - goto out; - - scanned += bc->nr_freed_pcpu; - list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) { if (!poll_state_synchronize_srcu(&c->btree_trans_barrier, ck->btree_trans_barrier_seq)) @@ -875,9 +868,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, bc->nr_freed_pcpu--; } - if (scanned >= nr) - goto out; - rcu_read_lock(); tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); if (bc->shrink_iter >= tbl->size) @@ -893,12 +883,12 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter); ck = container_of(pos, struct bkey_cached, hash); - if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) + if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { goto next; - - if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) + } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) { clear_bit(BKEY_CACHED_ACCESSED, &ck->flags); - else if (bkey_cached_lock_for_evict(ck)) { + goto next; + } else if (bkey_cached_lock_for_evict(ck)) { bkey_cached_evict(bc, ck); bkey_cached_free(bc, ck); } @@ -916,7 +906,6 @@ next: } while (scanned < nr && bc->shrink_iter != start); rcu_read_unlock(); -out: memalloc_nofs_restore(flags); srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); mutex_unlock(&bc->lock); diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index 866bd278439f..c60794264da2 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -302,6 +302,8 @@ again: start->max_key = bpos_predecessor(n->min_key); start->range_updated = true; + } else if (n->level) { + n->overwritten = true; } else { struct printbuf buf = PRINTBUF; diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index e0c982a4195c..c69b233c41bb 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -321,9 +321,9 @@ struct bkey_cached { struct btree_bkey_cached_common c; unsigned long flags; + unsigned long btree_trans_barrier_seq; u16 u64s; bool valid; - u32 btree_trans_barrier_seq; struct bkey_cached_key key; struct rhash_head hash; diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 6030c396754f..b4efd8cc4d1a 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -1960,7 +1960,11 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates) return 0; - flags &= ~BCH_WATERMARK_MASK; + if ((flags & BCH_WATERMARK_MASK) <= BCH_WATERMARK_reclaim) { + flags &= ~BCH_WATERMARK_MASK; + flags |= BCH_WATERMARK_btree; + flags |= BCH_TRANS_COMMIT_journal_reclaim; + } b = trans->paths[path].l[level].b; diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c index 72781aad6ba7..4d14f19f5185 100644 --- a/fs/bcachefs/chardev.c +++ b/fs/bcachefs/chardev.c @@ -232,13 +232,15 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a /* We need request_key() to be called before we punt to kthread: */ opt_set(thr->opts, nostart, true); + bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); + thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); if (!IS_ERR(thr->c) && thr->c->opts.errors == BCH_ON_ERROR_panic) thr->c->opts.errors = BCH_ON_ERROR_ro; - ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_offline_fsck_ops); + ret = __bch2_run_thread_with_stdio(&thr->thr); out: darray_for_each(devs, i) kfree(*i); diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index b5ea9fa1259d..fce690007edf 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -188,7 +188,8 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino BUG_ON(!old); if (unlikely(old != inode)) { - discard_new_inode(&inode->v); + __destroy_inode(&inode->v); + kmem_cache_free(bch2_inode_cache, inode); inode = old; } else { mutex_lock(&c->vfs_inodes_lock); @@ -225,8 +226,10 @@ static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans) if (unlikely(!inode)) { int ret = drop_locks_do(trans, (inode = to_bch_ei(new_inode(c->vfs_sb))) ? 0 : -ENOMEM); - if (ret && inode) - discard_new_inode(&inode->v); + if (ret && inode) { + __destroy_inode(&inode->v); + kmem_cache_free(bch2_inode_cache, inode); + } if (ret) return ERR_PTR(ret); } diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 9aa28b52ab92..eb1f9d6f5a19 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1723,7 +1723,7 @@ static void journal_write_endio(struct bio *bio) percpu_ref_put(&ca->io_ref); } -static CLOSURE_CALLBACK(do_journal_write) +static CLOSURE_CALLBACK(journal_write_submit) { closure_type(w, struct journal_buf, io); struct journal *j = container_of(w, struct journal, buf[w->idx]); @@ -1768,6 +1768,44 @@ static CLOSURE_CALLBACK(do_journal_write) continue_at(cl, journal_write_done, j->wq); } +static CLOSURE_CALLBACK(journal_write_preflush) +{ + closure_type(w, struct journal_buf, io); + struct journal *j = container_of(w, struct journal, buf[w->idx]); + struct bch_fs *c = container_of(j, struct bch_fs, journal); + + if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { + spin_lock(&j->lock); + closure_wait(&j->async_wait, cl); + spin_unlock(&j->lock); + + continue_at(cl, journal_write_preflush, j->wq); + return; + } + + if (w->separate_flush) { + for_each_rw_member(c, ca) { + percpu_ref_get(&ca->io_ref); + + struct journal_device *ja = &ca->journal; + struct bio *bio = &ja->bio[w->idx]->bio; + bio_reset(bio, ca->disk_sb.bdev, + REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH); + bio->bi_end_io = journal_write_endio; + bio->bi_private = ca; + closure_bio_submit(bio, cl); + } + + continue_at(cl, journal_write_submit, j->wq); + } else { + /* + * no need to punt to another work item if we're not waiting on + * preflushes + */ + journal_write_submit(&cl->work); + } +} + static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) { struct bch_fs *c = container_of(j, struct bch_fs, journal); @@ -2033,23 +2071,9 @@ CLOSURE_CALLBACK(bch2_journal_write) goto err; if (!JSET_NO_FLUSH(w->data)) - closure_wait_event(&j->async_wait, j->seq_ondisk + 1 == le64_to_cpu(w->data->seq)); - - if (!JSET_NO_FLUSH(w->data) && w->separate_flush) { - for_each_rw_member(c, ca) { - percpu_ref_get(&ca->io_ref); - - struct journal_device *ja = &ca->journal; - struct bio *bio = &ja->bio[w->idx]->bio; - bio_reset(bio, ca->disk_sb.bdev, - REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH); - bio->bi_end_io = journal_write_endio; - bio->bi_private = ca; - closure_bio_submit(bio, cl); - } - } - - continue_at(cl, do_journal_write, j->wq); + continue_at(cl, journal_write_preflush, j->wq); + else + continue_at(cl, journal_write_submit, j->wq); return; no_io: continue_at(cl, journal_write_done, j->wq); diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 0f328aba9760..be5b47619327 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -249,7 +249,10 @@ int bch2_journal_replay(struct bch_fs *c) struct journal_key *k = *kp; - replay_now_at(j, k->journal_seq); + if (k->journal_seq) + replay_now_at(j, k->journal_seq); + else + replay_now_at(j, j->replay_journal_seq_end); ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc| diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c index 5980ba2563fe..35ca3f138de6 100644 --- a/fs/bcachefs/sb-clean.c +++ b/fs/bcachefs/sb-clean.c @@ -29,6 +29,14 @@ int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *cle for (entry = clean->start; entry < (struct jset_entry *) vstruct_end(&clean->field); entry = vstruct_next(entry)) { + if (vstruct_end(entry) > vstruct_end(&clean->field)) { + bch_err(c, "journal entry (u64s %u) overran end of superblock clean section (u64s %u) by %zu", + le16_to_cpu(entry->u64s), le32_to_cpu(clean->field.u64s), + (u64 *) vstruct_end(entry) - (u64 *) vstruct_end(&clean->field)); + bch2_sb_error_count(c, BCH_FSCK_ERR_sb_clean_entry_overrun); + return -BCH_ERR_fsck_repair_unimplemented; + } + ret = bch2_journal_entry_validate(c, NULL, entry, le16_to_cpu(c->disk_sb.sb->version), BCH_SB_BIG_ENDIAN(c->disk_sb.sb), diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h index 4ca6e7b0d8aa..06c7a644f4a4 100644 --- a/fs/bcachefs/sb-errors_types.h +++ b/fs/bcachefs/sb-errors_types.h @@ -271,7 +271,8 @@ x(btree_root_unreadable_and_scan_found_nothing, 263) \ x(snapshot_node_missing, 264) \ x(dup_backpointer_to_bad_csum_extent, 265) \ - x(btree_bitmap_not_marked, 266) + x(btree_bitmap_not_marked, 266) \ + x(sb_clean_entry_overrun, 267) enum bch_sb_error_id { #define x(t, n) BCH_FSCK_ERR_##t = n, diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c index 522a969345e5..5b8e621ac5eb 100644 --- a/fs/bcachefs/sb-members.c +++ b/fs/bcachefs/sb-members.c @@ -463,8 +463,8 @@ static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, uns m->btree_bitmap_shift += resize; } - for (unsigned bit = sectors >> m->btree_bitmap_shift; - bit << m->btree_bitmap_shift < end; + for (unsigned bit = start >> m->btree_bitmap_shift; + (u64) bit << m->btree_bitmap_shift < end; bit++) bitmap |= BIT_ULL(bit); diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h index b27c3e4467cf..5efa64eca5f8 100644 --- a/fs/bcachefs/sb-members.h +++ b/fs/bcachefs/sb-members.h @@ -235,11 +235,11 @@ static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 { u64 end = start + sectors; - if (end > 64 << ca->mi.btree_bitmap_shift) + if (end > 64ULL << ca->mi.btree_bitmap_shift) return false; - for (unsigned bit = sectors >> ca->mi.btree_bitmap_shift; - bit << ca->mi.btree_bitmap_shift < end; + for (unsigned bit = start >> ca->mi.btree_bitmap_shift; + (u64) bit << ca->mi.btree_bitmap_shift < end; bit++) if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit))) return false; diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 8daf80a38d60..88e214c609bb 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -544,6 +544,7 @@ static void __bch2_fs_free(struct bch_fs *c) bch2_find_btree_nodes_exit(&c->found_btree_nodes); bch2_free_pending_node_rewrites(c); + bch2_fs_allocator_background_exit(c); bch2_fs_sb_errors_exit(c); bch2_fs_counters_exit(c); bch2_fs_snapshots_exit(c); diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c index 940db15d6a93..b1af7ac430f6 100644 --- a/fs/bcachefs/thread_with_file.c +++ b/fs/bcachefs/thread_with_file.c @@ -294,16 +294,27 @@ static int thread_with_stdio_fn(void *arg) return 0; } -int bch2_run_thread_with_stdio(struct thread_with_stdio *thr, - const struct thread_with_stdio_ops *ops) +void bch2_thread_with_stdio_init(struct thread_with_stdio *thr, + const struct thread_with_stdio_ops *ops) { stdio_buf_init(&thr->stdio.input); stdio_buf_init(&thr->stdio.output); thr->ops = ops; +} +int __bch2_run_thread_with_stdio(struct thread_with_stdio *thr) +{ return bch2_run_thread_with_file(&thr->thr, &thread_with_stdio_fops, thread_with_stdio_fn); } +int bch2_run_thread_with_stdio(struct thread_with_stdio *thr, + const struct thread_with_stdio_ops *ops) +{ + bch2_thread_with_stdio_init(thr, ops); + + return __bch2_run_thread_with_stdio(thr); +} + int bch2_run_thread_with_stdout(struct thread_with_stdio *thr, const struct thread_with_stdio_ops *ops) { diff --git a/fs/bcachefs/thread_with_file.h b/fs/bcachefs/thread_with_file.h index af54ea8f5b0f..1d63d14d7dca 100644 --- a/fs/bcachefs/thread_with_file.h +++ b/fs/bcachefs/thread_with_file.h @@ -63,6 +63,9 @@ struct thread_with_stdio { const struct thread_with_stdio_ops *ops; }; +void bch2_thread_with_stdio_init(struct thread_with_stdio *, + const struct thread_with_stdio_ops *); +int __bch2_run_thread_with_stdio(struct thread_with_stdio *); int bch2_run_thread_with_stdio(struct thread_with_stdio *, const struct thread_with_stdio_ops *); int bch2_run_thread_with_stdout(struct thread_with_stdio *, diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index c1e6a5bbeeaf..58110c968667 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -2776,20 +2776,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes) size_t alloc_bytes; alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); - data = kvmalloc(alloc_bytes, GFP_KERNEL); + data = kvzalloc(alloc_bytes, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); - if (total_bytes >= sizeof(*data)) { + if (total_bytes >= sizeof(*data)) data->bytes_left = total_bytes - sizeof(*data); - data->bytes_missing = 0; - } else { + else data->bytes_missing = sizeof(*data) - total_bytes; - data->bytes_left = 0; - } - - data->elem_cnt = 0; - data->elem_missed = 0; return data; } diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 445f7716f1e2..24a048210b15 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -817,7 +817,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, split->block_len = em->block_len; split->orig_start = em->orig_start; } else { - const u64 diff = start + len - em->start; + const u64 diff = end - em->start; split->block_len = split->len; split->block_start += diff; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c65fe5de4022..7fed887e700c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1145,13 +1145,13 @@ static void submit_one_async_extent(struct async_chunk *async_chunk, 0, *alloc_hint, &ins, 1, 1); if (ret) { /* - * Here we used to try again by going back to non-compressed - * path for ENOSPC. But we can't reserve space even for - * compressed size, how could it work for uncompressed size - * which requires larger size? So here we directly go error - * path. + * We can't reserve contiguous space for the compressed size. + * Unlikely, but it's possible that we could have enough + * non-contiguous space for the uncompressed size instead. So + * fall back to uncompressed. */ - goto out_free; + submit_uncompressed_range(inode, async_extent, locked_page); + goto done; } /* Here we're doing allocation and writeback of the compressed pages */ @@ -1203,7 +1203,6 @@ done: out_free_reserve: btrfs_dec_block_group_reservations(fs_info, ins.objectid); btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); -out_free: mapping_set_error(inode->vfs_inode.i_mapping, -EIO); extent_clear_unlock_delalloc(inode, start, end, NULL, EXTENT_LOCKED | EXTENT_DELALLOC | diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c index c96dd66fd0f7..210d9c82e2ae 100644 --- a/fs/btrfs/messages.c +++ b/fs/btrfs/messages.c @@ -7,7 +7,7 @@ #ifdef CONFIG_PRINTK -#define STATE_STRING_PREFACE ": state " +#define STATE_STRING_PREFACE " state " #define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT + 1) /* diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index fa25004ab04e..4b22cfe9a98c 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1012,6 +1012,7 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work) struct btrfs_fs_info *fs_info = sctx->fs_info; int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, stripe->bg->length); + unsigned long repaired; int mirror; int i; @@ -1078,16 +1079,15 @@ out: * Submit the repaired sectors. For zoned case, we cannot do repair * in-place, but queue the bg to be relocated. */ - if (btrfs_is_zoned(fs_info)) { - if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) + bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap, + stripe->nr_sectors); + if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) { + if (btrfs_is_zoned(fs_info)) { btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start); - } else if (!sctx->readonly) { - unsigned long repaired; - - bitmap_andnot(&repaired, &stripe->init_error_bitmap, - &stripe->error_bitmap, stripe->nr_sectors); - scrub_write_sectors(sctx, stripe, repaired, false); - wait_scrub_stripe_io(stripe); + } else { + scrub_write_sectors(sctx, stripe, repaired, false); + wait_scrub_stripe_io(stripe); + } } scrub_stripe_report_errors(sctx, stripe); diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c index 253cce7ffecf..47b5d301038e 100644 --- a/fs/btrfs/tests/extent-map-tests.c +++ b/fs/btrfs/tests/extent-map-tests.c @@ -847,6 +847,11 @@ static int test_case_7(struct btrfs_fs_info *fs_info) goto out; } + if (em->block_start != SZ_32K + SZ_4K) { + test_err("em->block_start is %llu, expected 36K", em->block_start); + goto out; + } + free_extent_map(em); read_lock(&em_tree->lock); diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index b6cad106c37e..0b2da7b7e2ad 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -310,6 +310,10 @@ struct cuse_init_args { /** * cuse_process_init_reply - finish initializing CUSE channel * + * @fm: The fuse mount information containing the CUSE connection. + * @args: The arguments passed to the init reply. + * @error: The error code signifying if any error occurred during the process. + * * This function creates the character device and sets up all the * required data structures for it. Please read the comment at the * top of this file for high level overview. diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 4a6df591add6..2b0d4781f394 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1321,6 +1321,7 @@ retry: err = fuse_do_statx(inode, file, stat); if (err == -ENOSYS) { fc->no_statx = 1; + err = 0; goto retry; } } else { diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a56e7bffd000..b57ce4157640 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1362,7 +1362,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, bool *exclusive) { struct inode *inode = file_inode(iocb->ki_filp); - struct fuse_file *ff = iocb->ki_filp->private_data; + struct fuse_inode *fi = get_fuse_inode(inode); *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); if (*exclusive) { @@ -1377,7 +1377,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, * have raced, so check it again. */ if (fuse_io_past_eof(iocb, from) || - fuse_file_uncached_io_start(inode, ff, NULL) != 0) { + fuse_inode_uncached_io_start(fi, NULL) != 0) { inode_unlock_shared(inode); inode_lock(inode); *exclusive = true; @@ -1388,13 +1388,13 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive) { struct inode *inode = file_inode(iocb->ki_filp); - struct fuse_file *ff = iocb->ki_filp->private_data; + struct fuse_inode *fi = get_fuse_inode(inode); if (exclusive) { inode_unlock(inode); } else { /* Allow opens in caching mode after last parallel dio end */ - fuse_file_uncached_io_end(inode, ff); + fuse_inode_uncached_io_end(fi); inode_unlock_shared(inode); } } @@ -2574,8 +2574,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) * First mmap of direct_io file enters caching inode io mode. * Also waits for parallel dio writers to go into serial mode * (exclusive instead of shared lock). + * After first mmap, the inode stays in caching io mode until + * the direct_io file release. */ - rc = fuse_file_cached_io_start(inode, ff); + rc = fuse_file_cached_io_open(inode, ff); if (rc) return rc; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index b24084b60864..f23919610313 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1394,9 +1394,10 @@ int fuse_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa); /* iomode.c */ -int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff); -int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb); -void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff); +int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff); +int fuse_inode_uncached_io_start(struct fuse_inode *fi, + struct fuse_backing *fb); +void fuse_inode_uncached_io_end(struct fuse_inode *fi); int fuse_file_io_open(struct file *file, struct inode *inode); void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 3a5d88878335..99e44ea7d875 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -175,6 +175,7 @@ static void fuse_evict_inode(struct inode *inode) } } if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) { + WARN_ON(fi->iocachectr != 0); WARN_ON(!list_empty(&fi->write_files)); WARN_ON(!list_empty(&fi->queued_writes)); } diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c index c653ddcf0578..c99e285f3183 100644 --- a/fs/fuse/iomode.c +++ b/fs/fuse/iomode.c @@ -21,12 +21,13 @@ static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi) } /* - * Start cached io mode. + * Called on cached file open() and on first mmap() of direct_io file. + * Takes cached_io inode mode reference to be dropped on file release. * * Blocks new parallel dio writes and waits for the in-progress parallel dio * writes to complete. */ -int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) +int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff) { struct fuse_inode *fi = get_fuse_inode(inode); @@ -67,10 +68,9 @@ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) return 0; } -static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) +static void fuse_file_cached_io_release(struct fuse_file *ff, + struct fuse_inode *fi) { - struct fuse_inode *fi = get_fuse_inode(inode); - spin_lock(&fi->lock); WARN_ON(fi->iocachectr <= 0); WARN_ON(ff->iomode != IOM_CACHED); @@ -82,16 +82,15 @@ static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) } /* Start strictly uncached io mode where cache access is not allowed */ -int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb) +int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb) { - struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_backing *oldfb; int err = 0; spin_lock(&fi->lock); /* deny conflicting backing files on same fuse inode */ oldfb = fuse_inode_backing(fi); - if (oldfb && oldfb != fb) { + if (fb && oldfb && oldfb != fb) { err = -EBUSY; goto unlock; } @@ -99,12 +98,10 @@ int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struc err = -ETXTBSY; goto unlock; } - WARN_ON(ff->iomode != IOM_NONE); fi->iocachectr--; - ff->iomode = IOM_UNCACHED; /* fuse inode holds a single refcount of backing file */ - if (!oldfb) { + if (fb && !oldfb) { oldfb = fuse_inode_backing_set(fi, fb); WARN_ON_ONCE(oldfb != NULL); } else { @@ -115,15 +112,29 @@ unlock: return err; } -void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) +/* Takes uncached_io inode mode reference to be dropped on file release */ +static int fuse_file_uncached_io_open(struct inode *inode, + struct fuse_file *ff, + struct fuse_backing *fb) { struct fuse_inode *fi = get_fuse_inode(inode); + int err; + + err = fuse_inode_uncached_io_start(fi, fb); + if (err) + return err; + + WARN_ON(ff->iomode != IOM_NONE); + ff->iomode = IOM_UNCACHED; + return 0; +} + +void fuse_inode_uncached_io_end(struct fuse_inode *fi) +{ struct fuse_backing *oldfb = NULL; spin_lock(&fi->lock); WARN_ON(fi->iocachectr >= 0); - WARN_ON(ff->iomode != IOM_UNCACHED); - ff->iomode = IOM_NONE; fi->iocachectr++; if (!fi->iocachectr) { wake_up(&fi->direct_io_waitq); @@ -134,6 +145,15 @@ void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) fuse_backing_put(oldfb); } +/* Drop uncached_io reference from passthrough open */ +static void fuse_file_uncached_io_release(struct fuse_file *ff, + struct fuse_inode *fi) +{ + WARN_ON(ff->iomode != IOM_UNCACHED); + ff->iomode = IOM_NONE; + fuse_inode_uncached_io_end(fi); +} + /* * Open flags that are allowed in combination with FOPEN_PASSTHROUGH. * A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write @@ -163,7 +183,7 @@ static int fuse_file_passthrough_open(struct inode *inode, struct file *file) return PTR_ERR(fb); /* First passthrough file open denies caching inode io mode */ - err = fuse_file_uncached_io_start(inode, ff, fb); + err = fuse_file_uncached_io_open(inode, ff, fb); if (!err) return 0; @@ -216,7 +236,7 @@ int fuse_file_io_open(struct file *file, struct inode *inode) if (ff->open_flags & FOPEN_PASSTHROUGH) err = fuse_file_passthrough_open(inode, file); else - err = fuse_file_cached_io_start(inode, ff); + err = fuse_file_cached_io_open(inode, ff); if (err) goto fail; @@ -236,8 +256,10 @@ fail: /* No more pending io and no new io possible to inode via open/mmapped file */ void fuse_file_io_release(struct fuse_file *ff, struct inode *inode) { + struct fuse_inode *fi = get_fuse_inode(inode); + /* - * Last parallel dio close allows caching inode io mode. + * Last passthrough file close allows caching inode io mode. * Last caching file close exits caching inode io mode. */ switch (ff->iomode) { @@ -245,10 +267,10 @@ void fuse_file_io_release(struct fuse_file *ff, struct inode *inode) /* Nothing to do */ break; case IOM_UNCACHED: - fuse_file_uncached_io_end(inode, ff); + fuse_file_uncached_io_release(ff, fi); break; case IOM_CACHED: - fuse_file_cached_io_end(inode, ff); + fuse_file_cached_io_release(ff, fi); break; } } diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 87c9547989f6..e88aca0c6e8e 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -983,15 +983,7 @@ static struct workqueue_struct *callback_wq; static bool nfsd4_queue_cb(struct nfsd4_callback *cb) { trace_nfsd_cb_queue(cb->cb_clp, cb); - return queue_delayed_work(callback_wq, &cb->cb_work, 0); -} - -static void nfsd4_queue_cb_delayed(struct nfsd4_callback *cb, - unsigned long msecs) -{ - trace_nfsd_cb_queue(cb->cb_clp, cb); - queue_delayed_work(callback_wq, &cb->cb_work, - msecs_to_jiffies(msecs)); + return queue_work(callback_wq, &cb->cb_work); } static void nfsd41_cb_inflight_begin(struct nfs4_client *clp) @@ -1490,7 +1482,7 @@ static void nfsd4_run_cb_work(struct work_struct *work) { struct nfsd4_callback *cb = - container_of(work, struct nfsd4_callback, cb_work.work); + container_of(work, struct nfsd4_callback, cb_work); struct nfs4_client *clp = cb->cb_clp; struct rpc_clnt *clnt; int flags; @@ -1502,16 +1494,8 @@ nfsd4_run_cb_work(struct work_struct *work) clnt = clp->cl_cb_client; if (!clnt) { - if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) - nfsd41_destroy_cb(cb); - else { - /* - * XXX: Ideally, we could wait for the client to - * reconnect, but I haven't figured out how - * to do that yet. - */ - nfsd4_queue_cb_delayed(cb, 25); - } + /* Callback channel broken, or client killed; give up: */ + nfsd41_destroy_cb(cb); return; } @@ -1544,7 +1528,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp, cb->cb_msg.rpc_argp = cb; cb->cb_msg.rpc_resp = cb; cb->cb_ops = ops; - INIT_DELAYED_WORK(&cb->cb_work, nfsd4_run_cb_work); + INIT_WORK(&cb->cb_work, nfsd4_run_cb_work); cb->cb_status = 0; cb->cb_need_restart = false; cb->cb_holds_slot = false; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 01c6f3445646..2ed0fcf879fd 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -68,7 +68,7 @@ struct nfsd4_callback { struct nfs4_client *cb_clp; struct rpc_message cb_msg; const struct nfsd4_callback_ops *cb_ops; - struct delayed_work cb_work; + struct work_struct cb_work; int cb_seq_status; int cb_status; bool cb_need_restart; diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index bc846b904b68..aee40db7a036 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -240,7 +240,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = { #define S_SHIFT 12 static unsigned char -nilfs_type_by_mode[S_IFMT >> S_SHIFT] = { +nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = { [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR, [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV, diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index d41eedbff674..39277c37185c 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -389,6 +389,7 @@ cifs_alloc_inode(struct super_block *sb) * server, can not assume caching of file data or metadata. */ cifs_set_oplock_level(cifs_inode, 0); + cifs_inode->lease_granted = false; cifs_inode->flags = 0; spin_lock_init(&cifs_inode->writers_lock); cifs_inode->writers = 0; @@ -739,6 +740,8 @@ static void cifs_umount_begin(struct super_block *sb) spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock); + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_see_umount); if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { /* we have other mounts to same share or we have already tried to umount this and woken up diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index d6669ce4ae87..6ff35570db81 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -1190,6 +1190,7 @@ struct cifs_fattr { */ struct cifs_tcon { struct list_head tcon_list; + int debug_id; /* Debugging for tracing */ int tc_count; struct list_head rlist; /* reconnect list */ spinlock_t tc_lock; /* protect anything here that is not protected */ @@ -1276,7 +1277,9 @@ struct cifs_tcon { __u32 max_cached_dirs; #ifdef CONFIG_CIFS_FSCACHE u64 resource_id; /* server resource id */ + bool fscache_acquired; /* T if we've tried acquiring a cookie */ struct fscache_volume *fscache; /* cookie for share */ + struct mutex fscache_lock; /* Prevent regetting a cookie */ #endif struct list_head pending_opens; /* list of incomplete opens */ struct cached_fids *cfids; diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index 8e0a348f1f66..fbc358c09da3 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -303,7 +303,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, struct TCP_Server_Info *primary_server); extern void cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect); -extern void cifs_put_tcon(struct cifs_tcon *tcon); +extern void cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace); extern void cifs_release_automount_timer(void); @@ -530,8 +530,9 @@ extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses); extern struct cifs_ses *sesInfoAlloc(void); extern void sesInfoFree(struct cifs_ses *); -extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled); -extern void tconInfoFree(struct cifs_tcon *); +extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled, + enum smb3_tcon_ref_trace trace); +extern void tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace); extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence_number); @@ -721,8 +722,6 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options) return options; } -struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon); -void cifs_put_tcon_super(struct super_block *sb); int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry); /* Put references of @ses and its children */ diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 4e35970681bf..7a16e12f5da8 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -1943,7 +1943,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) } /* no need to setup directory caching on IPC share, so pass in false */ - tcon = tcon_info_alloc(false); + tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc); if (tcon == NULL) return -ENOMEM; @@ -1960,7 +1960,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) if (rc) { cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); - tconInfoFree(tcon); + tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail); goto out; } @@ -2043,7 +2043,7 @@ void __cifs_put_smb_ses(struct cifs_ses *ses) * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an * SMB2 LOGOFF Request. */ - tconInfoFree(tcon); + tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc); if (do_logoff) { xid = get_xid(); rc = server->ops->logoff(xid, ses); @@ -2432,6 +2432,8 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) continue; } ++tcon->tc_count; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_get_find); spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return tcon; @@ -2441,7 +2443,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) } void -cifs_put_tcon(struct cifs_tcon *tcon) +cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace) { unsigned int xid; struct cifs_ses *ses; @@ -2457,6 +2459,7 @@ cifs_put_tcon(struct cifs_tcon *tcon) cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock); + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count - 1, trace); if (--tcon->tc_count > 0) { spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); @@ -2493,7 +2496,7 @@ cifs_put_tcon(struct cifs_tcon *tcon) _free_xid(xid); cifs_fscache_release_super_cookie(tcon); - tconInfoFree(tcon); + tconInfoFree(tcon, netfs_trace_tcon_ref_free); cifs_put_smb_ses(ses); } @@ -2547,7 +2550,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) nohandlecache = ctx->nohandlecache; else nohandlecache = true; - tcon = tcon_info_alloc(!nohandlecache); + tcon = tcon_info_alloc(!nohandlecache, netfs_trace_tcon_ref_new); if (tcon == NULL) { rc = -ENOMEM; goto out_fail; @@ -2737,7 +2740,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) return tcon; out_fail: - tconInfoFree(tcon); + tconInfoFree(tcon, netfs_trace_tcon_ref_free_fail); return ERR_PTR(rc); } @@ -2754,7 +2757,7 @@ cifs_put_tlink(struct tcon_link *tlink) } if (!IS_ERR(tlink_tcon(tlink))) - cifs_put_tcon(tlink_tcon(tlink)); + cifs_put_tcon(tlink_tcon(tlink), netfs_trace_tcon_ref_put_tlink); kfree(tlink); } @@ -3319,7 +3322,7 @@ void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx) int rc = 0; if (mnt_ctx->tcon) - cifs_put_tcon(mnt_ctx->tcon); + cifs_put_tcon(mnt_ctx->tcon, netfs_trace_tcon_ref_put_mnt_ctx); else if (mnt_ctx->ses) cifs_put_smb_ses(mnt_ctx->ses); else if (mnt_ctx->server) diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 6c727d8c31e8..3bbac925d076 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -748,6 +748,16 @@ static int smb3_fs_context_validate(struct fs_context *fc) /* set the port that we got earlier */ cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port); + if (ctx->uid_specified && !ctx->forceuid_specified) { + ctx->override_uid = 1; + pr_notice("enabling forceuid mount option implicitly because uid= option is specified\n"); + } + + if (ctx->gid_specified && !ctx->forcegid_specified) { + ctx->override_gid = 1; + pr_notice("enabling forcegid mount option implicitly because gid= option is specified\n"); + } + if (ctx->override_uid && !ctx->uid_specified) { ctx->override_uid = 0; pr_notice("ignoring forceuid mount option specified with no uid= option\n"); @@ -1019,12 +1029,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, ctx->override_uid = 0; else ctx->override_uid = 1; + ctx->forceuid_specified = true; break; case Opt_forcegid: if (result.negated) ctx->override_gid = 0; else ctx->override_gid = 1; + ctx->forcegid_specified = true; break; case Opt_perm: if (result.negated) diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h index a947bddeba27..cf577ec0dd0a 100644 --- a/fs/smb/client/fs_context.h +++ b/fs/smb/client/fs_context.h @@ -165,6 +165,8 @@ enum cifs_param { }; struct smb3_fs_context { + bool forceuid_specified; + bool forcegid_specified; bool uid_specified; bool cruid_specified; bool gid_specified; diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c index 340efce8f052..1a895e6243ee 100644 --- a/fs/smb/client/fscache.c +++ b/fs/smb/client/fscache.c @@ -43,12 +43,23 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) char *key; int ret = -ENOMEM; + if (tcon->fscache_acquired) + return 0; + + mutex_lock(&tcon->fscache_lock); + if (tcon->fscache_acquired) { + mutex_unlock(&tcon->fscache_lock); + return 0; + } + tcon->fscache_acquired = true; + tcon->fscache = NULL; switch (sa->sa_family) { case AF_INET: case AF_INET6: break; default: + mutex_unlock(&tcon->fscache_lock); cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family); return -EINVAL; } @@ -57,6 +68,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) sharename = extract_sharename(tcon->tree_name); if (IS_ERR(sharename)) { + mutex_unlock(&tcon->fscache_lock); cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__); return PTR_ERR(sharename); } @@ -82,6 +94,11 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) } pr_err("Cache volume key already in use (%s)\n", key); vcookie = NULL; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_see_fscache_collision); + } else { + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_see_fscache_okay); } tcon->fscache = vcookie; @@ -90,6 +107,7 @@ out_2: kfree(key); out: kfree(sharename); + mutex_unlock(&tcon->fscache_lock); return ret; } @@ -102,6 +120,8 @@ void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) cifs_fscache_fill_volume_coherency(tcon, &cd); fscache_relinquish_volume(tcon->fscache, &cd, false); tcon->fscache = NULL; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_see_fscache_relinq); } void cifs_fscache_get_inode_cookie(struct inode *inode) diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c index 7d15a1969b81..07c468ddb88a 100644 --- a/fs/smb/client/misc.c +++ b/fs/smb/client/misc.c @@ -111,9 +111,10 @@ sesInfoFree(struct cifs_ses *buf_to_free) } struct cifs_tcon * -tcon_info_alloc(bool dir_leases_enabled) +tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace) { struct cifs_tcon *ret_buf; + static atomic_t tcon_debug_id; ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL); if (!ret_buf) @@ -130,7 +131,8 @@ tcon_info_alloc(bool dir_leases_enabled) atomic_inc(&tconInfoAllocCount); ret_buf->status = TID_NEW; - ++ret_buf->tc_count; + ret_buf->debug_id = atomic_inc_return(&tcon_debug_id); + ret_buf->tc_count = 1; spin_lock_init(&ret_buf->tc_lock); INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->tcon_list); @@ -139,17 +141,22 @@ tcon_info_alloc(bool dir_leases_enabled) atomic_set(&ret_buf->num_local_opens, 0); atomic_set(&ret_buf->num_remote_opens, 0); ret_buf->stats_from_time = ktime_get_real_seconds(); +#ifdef CONFIG_CIFS_FSCACHE + mutex_init(&ret_buf->fscache_lock); +#endif + trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace); return ret_buf; } void -tconInfoFree(struct cifs_tcon *tcon) +tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace) { if (tcon == NULL) { cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n"); return; } + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, trace); free_cached_dirs(tcon->cfids); atomic_dec(&tconInfoAllocCount); kfree(tcon->nativeFileSystem); diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c index cc72be5a93a9..677ef6f99a5b 100644 --- a/fs/smb/client/smb2misc.c +++ b/fs/smb/client/smb2misc.c @@ -767,7 +767,7 @@ smb2_cancelled_close_fid(struct work_struct *work) if (rc) cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc); - cifs_put_tcon(tcon); + cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close_fid); kfree(cancelled); } @@ -811,6 +811,8 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid, if (tcon->tc_count <= 0) { struct TCP_Server_Info *server = NULL; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_see_cancelled_close); WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative"); spin_unlock(&cifs_tcp_ses_lock); @@ -823,12 +825,14 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid, return 0; } tcon->tc_count++; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_get_cancelled_close); spin_unlock(&cifs_tcp_ses_lock); rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0, persistent_fid, volatile_fid); if (rc) - cifs_put_tcon(tcon); + cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close); return rc; } @@ -856,7 +860,7 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve rsp->PersistentFileId, rsp->VolatileFileId); if (rc) - cifs_put_tcon(tcon); + cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_mid); return rc; } diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 78c94d0350fe..28f0b7d19d53 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -2915,8 +2915,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, tcon = list_first_entry_or_null(&ses->tcon_list, struct cifs_tcon, tcon_list); - if (tcon) + if (tcon) { tcon->tc_count++; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_get_dfs_refer); + } spin_unlock(&cifs_tcp_ses_lock); } @@ -2980,6 +2983,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, /* ipc tcons are not refcounted */ spin_lock(&cifs_tcp_ses_lock); tcon->tc_count--; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_dec_dfs_refer); /* tc_count can never go negative */ WARN_ON(tcon->tc_count < 0); spin_unlock(&cifs_tcp_ses_lock); diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 86c647a947cc..a5efce03cb58 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -4138,6 +4138,8 @@ void smb2_reconnect_server(struct work_struct *work) list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->need_reconnect || tcon->need_reopen_files) { tcon->tc_count++; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_get_reconnect_server); list_add_tail(&tcon->rlist, &tmp_list); tcon_selected = true; } @@ -4176,14 +4178,14 @@ void smb2_reconnect_server(struct work_struct *work) if (tcon->ipc) cifs_put_smb_ses(tcon->ses); else - cifs_put_tcon(tcon); + cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server); } if (!ses_exist) goto done; /* allocate a dummy tcon struct used for reconnect */ - tcon = tcon_info_alloc(false); + tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server); if (!tcon) { resched = true; list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { @@ -4206,7 +4208,7 @@ void smb2_reconnect_server(struct work_struct *work) list_del_init(&ses->rlist); cifs_put_smb_ses(ses); } - tconInfoFree(tcon); + tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server); done: cifs_dbg(FYI, "Reconnecting tcons and channels finished\n"); diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c index 1d6e54f7879e..02135a605305 100644 --- a/fs/smb/client/smb2transport.c +++ b/fs/smb/client/smb2transport.c @@ -189,6 +189,8 @@ smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid) if (tcon->tid != tid) continue; ++tcon->tc_count; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_get_find_sess_tcon); return tcon; } diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h index 5e83cb9da902..604e52876cd2 100644 --- a/fs/smb/client/trace.h +++ b/fs/smb/client/trace.h @@ -3,6 +3,9 @@ * Copyright (C) 2018, Microsoft Corporation. * * Author(s): Steve French <stfrench@microsoft.com> + * + * Please use this 3-part article as a reference for writing new tracepoints: + * https://lwn.net/Articles/379903/ */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cifs @@ -15,9 +18,70 @@ #include <linux/inet.h> /* - * Please use this 3-part article as a reference for writing new tracepoints: - * https://lwn.net/Articles/379903/ + * Specify enums for tracing information. + */ +#define smb3_tcon_ref_traces \ + EM(netfs_trace_tcon_ref_dec_dfs_refer, "DEC DfsRef") \ + EM(netfs_trace_tcon_ref_free, "FRE ") \ + EM(netfs_trace_tcon_ref_free_fail, "FRE Fail ") \ + EM(netfs_trace_tcon_ref_free_ipc, "FRE Ipc ") \ + EM(netfs_trace_tcon_ref_free_ipc_fail, "FRE Ipc-F ") \ + EM(netfs_trace_tcon_ref_free_reconnect_server, "FRE Reconn") \ + EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \ + EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \ + EM(netfs_trace_tcon_ref_get_find, "GET Find ") \ + EM(netfs_trace_tcon_ref_get_find_sess_tcon, "GET FndSes") \ + EM(netfs_trace_tcon_ref_get_reconnect_server, "GET Reconn") \ + EM(netfs_trace_tcon_ref_new, "NEW ") \ + EM(netfs_trace_tcon_ref_new_ipc, "NEW Ipc ") \ + EM(netfs_trace_tcon_ref_new_reconnect_server, "NEW Reconn") \ + EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \ + EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \ + EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \ + EM(netfs_trace_tcon_ref_put_mnt_ctx, "PUT MntCtx") \ + EM(netfs_trace_tcon_ref_put_reconnect_server, "PUT Reconn") \ + EM(netfs_trace_tcon_ref_put_tlink, "PUT Tlink ") \ + EM(netfs_trace_tcon_ref_see_cancelled_close, "SEE Cn-Cls") \ + EM(netfs_trace_tcon_ref_see_fscache_collision, "SEE FV-CO!") \ + EM(netfs_trace_tcon_ref_see_fscache_okay, "SEE FV-Ok ") \ + EM(netfs_trace_tcon_ref_see_fscache_relinq, "SEE FV-Rlq") \ + E_(netfs_trace_tcon_ref_see_umount, "SEE Umount") + +#undef EM +#undef E_ + +/* + * Define those tracing enums. + */ +#ifndef __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY + +#define EM(a, b) a, +#define E_(a, b) a + +enum smb3_tcon_ref_trace { smb3_tcon_ref_traces } __mode(byte); + +#undef EM +#undef E_ +#endif + +/* + * Export enum symbols via userspace. + */ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +smb3_tcon_ref_traces; + +#undef EM +#undef E_ + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. */ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } /* For logging errors in read or write */ DECLARE_EVENT_CLASS(smb3_rw_err_class, @@ -1125,6 +1189,30 @@ DEFINE_SMB3_CREDIT_EVENT(waitff_credits); DEFINE_SMB3_CREDIT_EVENT(overflow_credits); DEFINE_SMB3_CREDIT_EVENT(set_credits); + +TRACE_EVENT(smb3_tcon_ref, + TP_PROTO(unsigned int tcon_debug_id, int ref, + enum smb3_tcon_ref_trace trace), + TP_ARGS(tcon_debug_id, ref, trace), + TP_STRUCT__entry( + __field(unsigned int, tcon) + __field(int, ref) + __field(enum smb3_tcon_ref_trace, trace) + ), + TP_fast_assign( + __entry->tcon = tcon_debug_id; + __entry->ref = ref; + __entry->trace = trace; + ), + TP_printk("TC=%08x %s r=%u", + __entry->tcon, + __print_symbolic(__entry->trace, smb3_tcon_ref_traces), + __entry->ref) + ); + + +#undef EM +#undef E_ #endif /* _CIFS_TRACE_H */ #undef TRACE_INCLUDE_PATH diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h index 1b594307c9d5..202ff9128156 100644 --- a/fs/smb/common/smb2pdu.h +++ b/fs/smb/common/smb2pdu.h @@ -711,7 +711,7 @@ struct smb2_close_rsp { __le16 StructureSize; /* 60 */ __le16 Flags; __le32 Reserved; - struct_group(network_open_info, + struct_group_attr(network_open_info, __packed, __le64 CreationTime; __le64 LastAccessTime; __le64 LastWriteTime; diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h index 686b321c5a8b..f4e55199938d 100644 --- a/fs/smb/server/ksmbd_netlink.h +++ b/fs/smb/server/ksmbd_netlink.h @@ -340,23 +340,24 @@ enum KSMBD_TREE_CONN_STATUS { /* * Share config flags. */ -#define KSMBD_SHARE_FLAG_INVALID (0) -#define KSMBD_SHARE_FLAG_AVAILABLE BIT(0) -#define KSMBD_SHARE_FLAG_BROWSEABLE BIT(1) -#define KSMBD_SHARE_FLAG_WRITEABLE BIT(2) -#define KSMBD_SHARE_FLAG_READONLY BIT(3) -#define KSMBD_SHARE_FLAG_GUEST_OK BIT(4) -#define KSMBD_SHARE_FLAG_GUEST_ONLY BIT(5) -#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS BIT(6) -#define KSMBD_SHARE_FLAG_OPLOCKS BIT(7) -#define KSMBD_SHARE_FLAG_PIPE BIT(8) -#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES BIT(9) -#define KSMBD_SHARE_FLAG_INHERIT_OWNER BIT(10) -#define KSMBD_SHARE_FLAG_STREAMS BIT(11) -#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12) -#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13) -#define KSMBD_SHARE_FLAG_UPDATE BIT(14) -#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15) +#define KSMBD_SHARE_FLAG_INVALID (0) +#define KSMBD_SHARE_FLAG_AVAILABLE BIT(0) +#define KSMBD_SHARE_FLAG_BROWSEABLE BIT(1) +#define KSMBD_SHARE_FLAG_WRITEABLE BIT(2) +#define KSMBD_SHARE_FLAG_READONLY BIT(3) +#define KSMBD_SHARE_FLAG_GUEST_OK BIT(4) +#define KSMBD_SHARE_FLAG_GUEST_ONLY BIT(5) +#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS BIT(6) +#define KSMBD_SHARE_FLAG_OPLOCKS BIT(7) +#define KSMBD_SHARE_FLAG_PIPE BIT(8) +#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES BIT(9) +#define KSMBD_SHARE_FLAG_INHERIT_OWNER BIT(10) +#define KSMBD_SHARE_FLAG_STREAMS BIT(11) +#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12) +#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13) +#define KSMBD_SHARE_FLAG_UPDATE BIT(14) +#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15) +#define KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY BIT(16) /* * Tree connect request flags. diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c index c0788188aa82..c67fbc8d6683 100644 --- a/fs/smb/server/server.c +++ b/fs/smb/server/server.c @@ -167,20 +167,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work, int rc; bool is_chained = false; - if (conn->ops->allocate_rsp_buf(work)) - return; - if (conn->ops->is_transform_hdr && conn->ops->is_transform_hdr(work->request_buf)) { rc = conn->ops->decrypt_req(work); - if (rc < 0) { - conn->ops->set_rsp_status(work, STATUS_DATA_ERROR); - goto send; - } - + if (rc < 0) + return; work->encrypted = true; } + if (conn->ops->allocate_rsp_buf(work)) + return; + rc = conn->ops->init_rsp_hdr(work); if (rc) { /* either uid or tid is not correct */ diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 5723bbf372d7..355824151c2d 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -535,6 +535,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work) if (cmd == SMB2_QUERY_INFO_HE) { struct smb2_query_info_req *req; + if (get_rfc1002_len(work->request_buf) < + offsetof(struct smb2_query_info_req, OutputBufferLength)) + return -EINVAL; + req = smb2_get_msg(work->request_buf); if ((req->InfoType == SMB2_O_INFO_FILE && (req->FileInfoClass == FILE_FULL_EA_INFORMATION || @@ -1984,7 +1988,12 @@ int smb2_tree_connect(struct ksmbd_work *work) write_unlock(&sess->tree_conns_lock); rsp->StructureSize = cpu_to_le16(16); out_err1: - rsp->Capabilities = 0; + if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE && + test_share_config_flag(share, + KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY)) + rsp->Capabilities = SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY; + else + rsp->Capabilities = 0; rsp->Reserved = 0; /* default manual caching */ rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING; @@ -3498,7 +3507,9 @@ int smb2_open(struct ksmbd_work *work) memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE); if (dh_info.type == DURABLE_REQ_V2 || dh_info.type == DURABLE_REQ) { - if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent) + if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent && + test_share_config_flag(work->tcon->share_conf, + KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY)) fp->is_persistent = true; else fp->is_durable = true; diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 22f0f3db3ac9..51b1b0bed616 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -754,10 +754,15 @@ retry: goto out4; } + /* + * explicitly handle file overwrite case, for compatibility with + * filesystems that may not support rename flags (e.g: fuse) + */ if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) { err = -EEXIST; goto out4; } + flags &= ~(RENAME_NOREPLACE); if (old_child == trap) { err = -EINVAL; diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c index aa3411354e66..16bd693d0b3a 100644 --- a/fs/squashfs/inode.c +++ b/fs/squashfs/inode.c @@ -48,6 +48,10 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode, gid_t i_gid; int err; + inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); + if (inode->i_ino == 0) + return -EINVAL; + err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid); if (err) return err; @@ -58,7 +62,6 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode, i_uid_write(inode, i_uid); i_gid_write(inode, i_gid); - inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0); inode_set_atime(inode, inode_get_mtime_sec(inode), 0); inode_set_ctime(inode, inode_get_mtime_sec(inode), 0); diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 6b7652fb8050..7cd64021d453 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -463,6 +463,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, kn = kernfs_find_and_get(kobj->sd, attr->name); if (kn) kernfs_break_active_protection(kn); + else + kobject_put(kobj); return kn; } EXPORT_SYMBOL_GPL(sysfs_break_active_protection); |