diff options
Diffstat (limited to 'fs/ntfs3/file.c')
-rw-r--r-- | fs/ntfs3/file.c | 87 |
1 files changed, 59 insertions, 28 deletions
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 34ed242e1063..c1ece707b195 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -57,6 +57,10 @@ long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) struct inode *inode = file_inode(filp); struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ntfs_i(inode)))) + return -EINVAL; + switch (cmd) { case FITRIM: return ntfs_ioctl_fitrim(sbi, arg); @@ -81,6 +85,10 @@ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, struct inode *inode = d_inode(path->dentry); struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + stat->result_mask |= STATX_BTIME; stat->btime = ni->i_crtime; stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ @@ -154,13 +162,13 @@ static int ntfs_extend_initialized_size(struct file *file, if (pos + len > new_valid) len = new_valid - pos; - err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL); + err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL); if (err) goto out; folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); - err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL); + err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL); if (err < 0) goto out; pos += len; @@ -261,16 +269,21 @@ out: } /* - * ntfs_file_mmap - file_operations::mmap + * ntfs_file_mmap_prepare - file_operations::mmap_prepare */ -static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int ntfs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; struct inode *inode = file_inode(file); struct ntfs_inode *ni = ntfs_i(inode); - u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); - bool rw = vma->vm_flags & VM_WRITE; + u64 from = ((u64)desc->pgoff << PAGE_SHIFT); + bool rw = desc->vm_flags & VM_WRITE; int err; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -291,7 +304,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) if (rw) { u64 to = min_t(loff_t, i_size_read(inode), - from + vma->vm_end - vma->vm_start); + from + desc->end - desc->start); if (is_sparsed(ni)) { /* Allocate clusters for rw map. */ @@ -310,7 +323,10 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) } if (ni->i_valid < to) { - inode_lock(inode); + if (!inode_trylock(inode)) { + err = -EAGAIN; + goto out; + } err = ntfs_extend_initialized_size(file, ni, ni->i_valid, to); inode_unlock(inode); @@ -319,7 +335,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) } } - err = generic_file_mmap(file, vma); + err = generic_file_mmap_prepare(desc); out: return err; } @@ -735,6 +751,10 @@ int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, umode_t mode = inode->i_mode; int err; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -795,6 +815,10 @@ static int check_read_restriction(struct inode *inode) { struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -913,7 +937,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) struct ntfs_inode *ni = ntfs_i(inode); u64 valid = ni->i_valid; struct ntfs_sb_info *sbi = ni->mi.sbi; - struct page *page, **pages = NULL; + struct page **pages = NULL; + struct folio *folio; size_t written = 0; u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; u32 frame_size = 1u << frame_bits; @@ -923,7 +948,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) u64 frame_vbo; pgoff_t index; bool frame_uptodate; - struct folio *folio; if (frame_size < PAGE_SIZE) { /* @@ -977,8 +1001,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) pages_per_frame); if (err) { for (ip = 0; ip < pages_per_frame; ip++) { - page = pages[ip]; - folio = page_folio(page); + folio = page_folio(pages[ip]); folio_unlock(folio); folio_put(folio); } @@ -989,10 +1012,9 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) ip = off >> PAGE_SHIFT; off = offset_in_page(valid); for (; ip < pages_per_frame; ip++, off = 0) { - page = pages[ip]; - folio = page_folio(page); - zero_user_segment(page, off, PAGE_SIZE); - flush_dcache_page(page); + folio = page_folio(pages[ip]); + folio_zero_segment(folio, off, PAGE_SIZE); + flush_dcache_folio(folio); folio_mark_uptodate(folio); } @@ -1001,8 +1023,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) ni_unlock(ni); for (ip = 0; ip < pages_per_frame; ip++) { - page = pages[ip]; - folio = page_folio(page); + folio = page_folio(pages[ip]); folio_mark_uptodate(folio); folio_unlock(folio); folio_put(folio); @@ -1046,8 +1067,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) if (err) { for (ip = 0; ip < pages_per_frame; ip++) { - page = pages[ip]; - folio = page_folio(page); + folio = page_folio(pages[ip]); folio_unlock(folio); folio_put(folio); } @@ -1065,10 +1085,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) for (;;) { size_t cp, tail = PAGE_SIZE - off; - page = pages[ip]; - cp = copy_page_from_iter_atomic(page, off, + folio = page_folio(pages[ip]); + cp = copy_folio_from_iter_atomic(folio, off, min(tail, bytes), from); - flush_dcache_page(page); + flush_dcache_folio(folio); copied += cp; bytes -= cp; @@ -1088,9 +1108,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) ni_unlock(ni); for (ip = 0; ip < pages_per_frame; ip++) { - page = pages[ip]; - ClearPageDirty(page); - folio = page_folio(page); + folio = page_folio(pages[ip]); + folio_clear_dirty(folio); folio_mark_uptodate(folio); folio_unlock(folio); folio_put(folio); @@ -1135,6 +1154,10 @@ static int check_write_restriction(struct inode *inode) { struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -1217,6 +1240,10 @@ int ntfs_file_open(struct inode *inode, struct file *file) { struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -1286,6 +1313,10 @@ int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int err; struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); if (err) return err; @@ -1336,7 +1367,7 @@ const struct file_operations ntfs_file_operations = { #endif .splice_read = ntfs_file_splice_read, .splice_write = ntfs_file_splice_write, - .mmap = ntfs_file_mmap, + .mmap_prepare = ntfs_file_mmap_prepare, .open = ntfs_file_open, .fsync = generic_file_fsync, .fallocate = ntfs_fallocate, |